From f99ef92c9024b1dfc604646c8c0912b963c11523 Mon Sep 17 00:00:00 2001 From: Ben Swartzlander Date: Thu, 8 Aug 2013 10:34:06 -0400 Subject: [PATCH] Initialize from cinder --- CONTRIBUTING.md | 12 + HACKING.rst | 275 + LICENSE | 176 + MANIFEST.in | 6 + README.rst | 21 + babel.cfg | 2 + bin/cinder-all | 70 + bin/cinder-api | 52 + bin/cinder-backup | 50 + bin/cinder-clear-rabbit-queues | 76 + bin/cinder-manage | 820 +++ bin/cinder-rootwrap | 128 + bin/cinder-rpc-zmq-receiver | 53 + bin/cinder-scheduler | 50 + bin/cinder-share | 60 + bin/cinder-volume | 61 + bin/cinder-volume-usage-audit | 101 + cinder/__init__.py | 32 + cinder/api/__init__.py | 32 + cinder/api/auth.py | 36 + cinder/api/common.py | 314 + cinder/api/contrib/__init__.py | 39 + cinder/api/contrib/admin_actions.py | 174 + cinder/api/contrib/backups.py | 278 + .../contrib/extended_snapshot_attributes.py | 125 + cinder/api/contrib/hosts.py | 265 + cinder/api/contrib/image_create.py | 31 + cinder/api/contrib/quota_classes.py | 103 + cinder/api/contrib/quotas.py | 125 + cinder/api/contrib/services.py | 139 + cinder/api/contrib/share_actions.py | 80 + cinder/api/contrib/share_snapshots.py | 181 + cinder/api/contrib/shares.py | 215 + cinder/api/contrib/types_extra_specs.py | 162 + cinder/api/contrib/types_manage.py | 122 + cinder/api/contrib/volume_actions.py | 204 + cinder/api/contrib/volume_host_attribute.py | 93 + cinder/api/contrib/volume_image_metadata.py | 106 + cinder/api/contrib/volume_tenant_attribute.py | 91 + cinder/api/extensions.py | 407 ++ cinder/api/middleware/__init__.py | 16 + cinder/api/middleware/auth.py | 140 + cinder/api/middleware/fault.py | 75 + cinder/api/middleware/sizelimit.py | 83 + cinder/api/openstack/__init__.py | 130 + cinder/api/openstack/urlmap.py | 27 + cinder/api/openstack/volume/__init__.py | 27 + cinder/api/openstack/volume/versions.py | 29 + cinder/api/openstack/wsgi.py | 1144 ++++ cinder/api/schemas/atom-link.rng | 141 + cinder/api/schemas/v1.1/extension.rng | 11 + cinder/api/schemas/v1.1/extensions.rng | 6 + cinder/api/schemas/v1.1/limits.rng | 28 + cinder/api/schemas/v1.1/metadata.rng | 9 + cinder/api/sizelimit.py | 28 + cinder/api/urlmap.py | 297 + cinder/api/v1/__init__.py | 0 cinder/api/v1/limits.py | 482 ++ cinder/api/v1/router.py | 95 + cinder/api/v1/snapshot_metadata.py | 164 + cinder/api/v1/snapshots.py | 234 + cinder/api/v1/types.py | 80 + cinder/api/v1/volume_metadata.py | 164 + cinder/api/v1/volumes.py | 421 ++ cinder/api/v2/__init__.py | 0 cinder/api/v2/limits.py | 482 ++ cinder/api/v2/router.py | 70 + cinder/api/v2/snapshot_metadata.py | 164 + cinder/api/v2/snapshots.py | 257 + cinder/api/v2/types.py | 80 + cinder/api/v2/views/__init__.py | 16 + cinder/api/v2/views/volumes.py | 122 + cinder/api/v2/volumes.py | 362 + cinder/api/versions.py | 282 + cinder/api/views/__init__.py | 16 + cinder/api/views/backups.py | 90 + cinder/api/views/limits.py | 100 + cinder/api/views/share_snapshots.py | 74 + cinder/api/views/shares.py | 74 + cinder/api/views/types.py | 34 + cinder/api/views/versions.py | 82 + cinder/api/xmlutil.py | 911 +++ cinder/backup/__init__.py | 23 + cinder/backup/api.py | 171 + cinder/backup/manager.py | 264 + cinder/backup/rpcapi.py | 73 + cinder/backup/services/__init__.py | 14 + cinder/backup/services/swift.py | 384 ++ cinder/brick/__init__.py | 16 + cinder/brick/iscsi/__init__.py | 16 + cinder/brick/iscsi/iscsi.py | 468 ++ cinder/brick/local_dev/__init__.py | 16 + cinder/brick/local_dev/lvm.py | 368 + cinder/common/__init__.py | 15 + cinder/common/sqlalchemyutils.py | 128 + cinder/compute/__init__.py | 0 cinder/compute/aggregate_states.py | 44 + cinder/context.py | 155 + cinder/db/__init__.py | 23 + cinder/db/api.py | 883 +++ cinder/db/base.py | 40 + cinder/db/migration.py | 38 + cinder/db/sqlalchemy/__init__.py | 17 + cinder/db/sqlalchemy/api.py | 2243 +++++++ cinder/db/sqlalchemy/migrate_repo/README | 4 + cinder/db/sqlalchemy/migrate_repo/__init__.py | 0 cinder/db/sqlalchemy/migrate_repo/manage.py | 4 + cinder/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../migrate_repo/versions/001_cinder_init.py | 272 + .../migrate_repo/versions/002_quota_class.py | 150 + .../versions/003_glance_metadata.py | 78 + .../versions/004_volume_type_to_uuid.py | 155 + .../versions/005_add_source_volume_column.py | 41 + .../versions/005_sqlite_downgrade.sql | 124 + .../006_snapshots_add_provider_location.py | 36 + .../versions/007_add_volume_snapshot_fk.py | 41 + .../versions/007_sqlite_downgrade.sql | 32 + .../migrate_repo/versions/008_add_backup.py | 95 + .../009_add_snapshot_metadata_table.py | 60 + .../versions/010_add_share_tables.py | 79 + .../versions/011_add_share_snapshot_table.py | 69 + .../migrate_repo/versions/__init__.py | 0 cinder/db/sqlalchemy/migration.py | 118 + cinder/db/sqlalchemy/models.py | 529 ++ cinder/db/sqlalchemy/session.py | 151 + cinder/exception.py | 614 ++ cinder/flags.py | 261 + cinder/image/__init__.py | 16 + cinder/image/glance.py | 460 ++ cinder/image/image_utils.py | 283 + cinder/locale/bg_BG/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/bs/LC_MESSAGES/cinder.po | 5581 +++++++++++++++ cinder/locale/cinder.pot | 5573 +++++++++++++++ cinder/locale/cs/LC_MESSAGES/cinder.po | 5631 ++++++++++++++++ cinder/locale/da/LC_MESSAGES/cinder.po | 5573 +++++++++++++++ cinder/locale/de/LC_MESSAGES/cinder.po | 5584 +++++++++++++++ cinder/locale/en_AU/LC_MESSAGES/cinder.po | 5605 ++++++++++++++++ cinder/locale/en_GB/LC_MESSAGES/cinder.po | 5605 ++++++++++++++++ cinder/locale/en_US/LC_MESSAGES/cinder.po | 5977 +++++++++++++++++ cinder/locale/es/LC_MESSAGES/cinder.po | 5614 ++++++++++++++++ cinder/locale/fi_FI/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/fr/LC_MESSAGES/cinder.po | 5620 ++++++++++++++++ cinder/locale/it/LC_MESSAGES/cinder.po | 5605 ++++++++++++++++ cinder/locale/ja/LC_MESSAGES/cinder.po | 5606 ++++++++++++++++ cinder/locale/ko/LC_MESSAGES/cinder.po | 5593 +++++++++++++++ cinder/locale/ko_KR/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/pt_BR/LC_MESSAGES/cinder.po | 5604 ++++++++++++++++ cinder/locale/ru/LC_MESSAGES/cinder.po | 5660 ++++++++++++++++ cinder/locale/tl/LC_MESSAGES/cinder.po | 5575 +++++++++++++++ cinder/locale/tr/LC_MESSAGES/cinder.po | 5573 +++++++++++++++ cinder/locale/uk/LC_MESSAGES/cinder.po | 5576 +++++++++++++++ cinder/locale/vi_VN/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/zh_CN/LC_MESSAGES/cinder.po | 5657 ++++++++++++++++ cinder/locale/zh_TW/LC_MESSAGES/cinder.po | 5598 +++++++++++++++ cinder/manager.py | 221 + cinder/openstack/__init__.py | 15 + cinder/openstack/common/README | 13 + cinder/openstack/common/__init__.py | 15 + cinder/openstack/common/context.py | 81 + cinder/openstack/common/eventlet_backdoor.py | 87 + cinder/openstack/common/exception.py | 142 + cinder/openstack/common/excutils.py | 51 + cinder/openstack/common/fileutils.py | 35 + cinder/openstack/common/gettextutils.py | 50 + cinder/openstack/common/importutils.py | 67 + cinder/openstack/common/jsonutils.py | 167 + cinder/openstack/common/local.py | 48 + cinder/openstack/common/lockutils.py | 278 + cinder/openstack/common/log.py | 540 ++ cinder/openstack/common/loopingcall.py | 147 + cinder/openstack/common/network_utils.py | 68 + cinder/openstack/common/notifier/__init__.py | 14 + cinder/openstack/common/notifier/api.py | 182 + .../openstack/common/notifier/log_notifier.py | 35 + .../common/notifier/no_op_notifier.py | 19 + .../common/notifier/rabbit_notifier.py | 29 + .../openstack/common/notifier/rpc_notifier.py | 46 + .../common/notifier/rpc_notifier2.py | 52 + .../common/notifier/test_notifier.py | 22 + cinder/openstack/common/policy.py | 301 + cinder/openstack/common/processutils.py | 181 + cinder/openstack/common/rootwrap/__init__.py | 16 + cinder/openstack/common/rootwrap/cmd.py | 128 + cinder/openstack/common/rootwrap/filters.py | 226 + cinder/openstack/common/rootwrap/wrapper.py | 149 + cinder/openstack/common/rpc/__init__.py | 307 + cinder/openstack/common/rpc/amqp.py | 677 ++ cinder/openstack/common/rpc/common.py | 508 ++ cinder/openstack/common/rpc/dispatcher.py | 153 + cinder/openstack/common/rpc/impl_fake.py | 195 + cinder/openstack/common/rpc/impl_kombu.py | 838 +++ cinder/openstack/common/rpc/impl_qpid.py | 649 ++ cinder/openstack/common/rpc/impl_zmq.py | 851 +++ cinder/openstack/common/rpc/matchmaker.py | 425 ++ .../openstack/common/rpc/matchmaker_redis.py | 149 + cinder/openstack/common/rpc/proxy.py | 179 + cinder/openstack/common/rpc/service.py | 75 + cinder/openstack/common/rpc/zmq_receiver.py | 41 + cinder/openstack/common/scheduler/__init__.py | 0 cinder/openstack/common/scheduler/filter.py | 71 + .../common/scheduler/filters/__init__.py | 41 + .../filters/availability_zone_filter.py | 30 + .../scheduler/filters/capabilities_filter.py | 63 + .../scheduler/filters/extra_specs_ops.py | 72 + .../common/scheduler/filters/json_filter.py | 150 + cinder/openstack/common/scheduler/weight.py | 91 + .../common/scheduler/weights/__init__.py | 45 + cinder/openstack/common/service.py | 332 + cinder/openstack/common/strutils.py | 150 + cinder/openstack/common/threadgroup.py | 114 + cinder/openstack/common/timeutils.py | 186 + cinder/openstack/common/uuidutils.py | 39 + cinder/policy.py | 105 + cinder/quota.py | 813 +++ cinder/scheduler/__init__.py | 27 + cinder/scheduler/chance.py | 86 + cinder/scheduler/driver.py | 109 + cinder/scheduler/filter_scheduler.py | 354 + cinder/scheduler/filters/__init__.py | 14 + cinder/scheduler/filters/capacity_filter.py | 57 + cinder/scheduler/filters/retry_filter.py | 45 + cinder/scheduler/host_manager.py | 337 + cinder/scheduler/manager.py | 178 + cinder/scheduler/rpcapi.py | 79 + cinder/scheduler/scheduler_options.py | 105 + cinder/scheduler/simple.py | 137 + cinder/scheduler/weights/__init__.py | 14 + cinder/scheduler/weights/capacity.py | 56 + cinder/service.py | 622 ++ cinder/share/__init__.py | 25 + cinder/share/api.py | 325 + cinder/share/configuration.py | 84 + cinder/share/driver.py | 178 + cinder/share/drivers/__init__.py | 22 + cinder/share/drivers/lvm.py | 609 ++ cinder/share/drivers/netapp.py | 745 ++ cinder/share/manager.py | 221 + cinder/share/rpcapi.py | 93 + cinder/test.py | 285 + cinder/testing/README.rst | 66 + cinder/tests/__init__.py | 85 + cinder/tests/api/__init__.py | 19 + cinder/tests/api/common.py | 38 + cinder/tests/api/contrib/__init__.py | 19 + cinder/tests/api/contrib/stubs.py | 125 + .../tests/api/contrib/test_admin_actions.py | 348 + cinder/tests/api/contrib/test_backups.py | 860 +++ .../test_extended_snapshot_attributes.py | 124 + cinder/tests/api/contrib/test_hosts.py | 202 + cinder/tests/api/contrib/test_services.py | 216 + .../tests/api/contrib/test_share_actions.py | 116 + .../tests/api/contrib/test_share_snapshots.py | 190 + cinder/tests/api/contrib/test_shares.py | 245 + .../api/contrib/test_types_extra_specs.py | 232 + cinder/tests/api/contrib/test_types_manage.py | 129 + .../tests/api/contrib/test_volume_actions.py | 248 + .../api/contrib/test_volume_host_attribute.py | 134 + .../api/contrib/test_volume_image_metadata.py | 130 + .../contrib/test_volume_tenant_attribute.py | 137 + cinder/tests/api/extensions/__init__.py | 15 + cinder/tests/api/extensions/foxinsocks.py | 93 + cinder/tests/api/fakes.py | 190 + cinder/tests/api/middleware/__init__.py | 0 cinder/tests/api/middleware/test_auth.py | 59 + cinder/tests/api/middleware/test_faults.py | 208 + cinder/tests/api/middleware/test_sizelimit.py | 100 + cinder/tests/api/openstack/__init__.py | 19 + cinder/tests/api/openstack/test_wsgi.py | 858 +++ cinder/tests/api/test_common.py | 243 + cinder/tests/api/test_extensions.py | 154 + cinder/tests/api/test_router.py | 158 + cinder/tests/api/test_wsgi.py | 67 + cinder/tests/api/test_xmlutil.py | 697 ++ cinder/tests/api/v1/__init__.py | 0 cinder/tests/api/v1/stubs.py | 131 + cinder/tests/api/v1/test_limits.py | 895 +++ cinder/tests/api/v1/test_snapshot_metadata.py | 458 ++ cinder/tests/api/v1/test_snapshots.py | 417 ++ cinder/tests/api/v1/test_types.py | 194 + cinder/tests/api/v1/test_volume_metadata.py | 441 ++ cinder/tests/api/v1/test_volumes.py | 764 +++ cinder/tests/api/v2/__init__.py | 0 cinder/tests/api/v2/stubs.py | 133 + cinder/tests/api/v2/test_limits.py | 890 +++ cinder/tests/api/v2/test_snapshot_metadata.py | 458 ++ cinder/tests/api/v2/test_snapshots.py | 425 ++ cinder/tests/api/v2/test_types.py | 211 + cinder/tests/api/v2/test_volumes.py | 955 +++ cinder/tests/backup/__init__.py | 14 + cinder/tests/backup/fake_service.py | 41 + cinder/tests/backup/fake_swift_client.py | 111 + cinder/tests/brick/__init__.py | 16 + cinder/tests/brick/test_brick_lvm.py | 144 + cinder/tests/db/__init__.py | 20 + cinder/tests/db/fakes.py | 46 + cinder/tests/declare_flags.py | 24 + cinder/tests/fake_driver.py | 118 + cinder/tests/fake_flags.py | 46 + cinder/tests/fake_utils.py | 112 + cinder/tests/glance/__init__.py | 20 + cinder/tests/glance/stubs.py | 112 + cinder/tests/image/__init__.py | 20 + cinder/tests/image/fake.py | 243 + cinder/tests/image/test_glance.py | 590 ++ cinder/tests/integrated/__init__.py | 22 + cinder/tests/integrated/api/__init__.py | 20 + cinder/tests/integrated/api/client.py | 219 + cinder/tests/integrated/integrated_helpers.py | 130 + cinder/tests/integrated/test_extensions.py | 40 + cinder/tests/integrated/test_login.py | 31 + cinder/tests/integrated/test_volumes.py | 198 + cinder/tests/integrated/test_xml.py | 51 + cinder/tests/monkey_patch_example/__init__.py | 33 + .../tests/monkey_patch_example/example_a.py | 29 + .../tests/monkey_patch_example/example_b.py | 30 + cinder/tests/policy.json | 42 + cinder/tests/runtime_flags.py | 24 + cinder/tests/scheduler/__init__.py | 19 + cinder/tests/scheduler/fakes.py | 104 + .../tests/scheduler/test_capacity_weigher.py | 94 + .../tests/scheduler/test_filter_scheduler.py | 302 + cinder/tests/scheduler/test_host_filters.py | 159 + cinder/tests/scheduler/test_host_manager.py | 299 + cinder/tests/scheduler/test_rpcapi.py | 91 + cinder/tests/scheduler/test_scheduler.py | 394 ++ .../tests/scheduler/test_scheduler_options.py | 138 + cinder/tests/test_HpSanISCSIDriver.py | 245 + cinder/tests/test_api.py | 75 + cinder/tests/test_backup.py | 395 ++ cinder/tests/test_backup_swift.py | 208 + cinder/tests/test_context.py | 72 + cinder/tests/test_coraid.py | 268 + cinder/tests/test_drivers_compatibility.py | 184 + cinder/tests/test_emc.py | 766 +++ cinder/tests/test_exception.py | 94 + cinder/tests/test_flags.py | 83 + cinder/tests/test_glusterfs.py | 579 ++ cinder/tests/test_hp3par.py | 1044 +++ cinder/tests/test_huawei.py | 859 +++ cinder/tests/test_iscsi.py | 190 + cinder/tests/test_migrations.conf | 9 + cinder/tests/test_migrations.py | 629 ++ cinder/tests/test_misc.py | 61 + cinder/tests/test_netapp.py | 2361 +++++++ cinder/tests/test_netapp_nfs.py | 695 ++ cinder/tests/test_nexenta.py | 302 + cinder/tests/test_nfs.py | 654 ++ cinder/tests/test_policy.py | 232 + cinder/tests/test_quota.py | 1369 ++++ cinder/tests/test_rbd.py | 266 + cinder/tests/test_scality.py | 185 + cinder/tests/test_service.py | 224 + cinder/tests/test_share.py | 368 + cinder/tests/test_share_api.py | 513 ++ cinder/tests/test_share_driver.py | 49 + cinder/tests/test_share_lvm.py | 754 +++ cinder/tests/test_share_netapp.py | 690 ++ cinder/tests/test_share_rpcapi.py | 150 + cinder/tests/test_sheepdog.py | 64 + cinder/tests/test_skip_examples.py | 47 + cinder/tests/test_solidfire.py | 283 + cinder/tests/test_storwize_svc.py | 1978 ++++++ cinder/tests/test_test.py | 46 + cinder/tests/test_test_utils.py | 28 + cinder/tests/test_utils.py | 728 ++ cinder/tests/test_volume.py | 1331 ++++ cinder/tests/test_volume_configuration.py | 72 + cinder/tests/test_volume_glance_metadata.py | 131 + cinder/tests/test_volume_rpcapi.py | 172 + cinder/tests/test_volume_types.py | 187 + cinder/tests/test_volume_types_extra_specs.py | 130 + cinder/tests/test_volume_utils.py | 117 + cinder/tests/test_windows.py | 220 + cinder/tests/test_wsgi.py | 264 + cinder/tests/test_xenapi_sm.py | 509 ++ cinder/tests/test_xiv.py | 245 + cinder/tests/test_zadara.py | 581 ++ cinder/tests/utils.py | 32 + cinder/tests/var/ca.crt | 35 + cinder/tests/var/certificate.crt | 30 + cinder/tests/var/privatekey.key | 51 + cinder/tests/windows/__init__.py | 0 cinder/tests/windows/basetestcase.py | 96 + cinder/tests/windows/db_fakes.py | 36 + cinder/tests/windows/mockproxy.py | 238 + cinder/tests/windows/stubs/README.rst | 2 + ...river.test_check_for_setup_errors_wmi.p.gz | Bin 0 -> 473 bytes ...stWindowsDriver.test_create_export_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_create_export_wmi.p.gz | Bin 0 -> 1455 bytes ...WindowsDriver.test_create_snapshot_os.p.gz | Bin 0 -> 441 bytes ...indowsDriver.test_create_snapshot_wmi.p.gz | Bin 0 -> 1476 bytes ...r.test_create_volume_from_snapshot_os.p.gz | Bin 0 -> 500 bytes ....test_create_volume_from_snapshot_wmi.p.gz | Bin 0 -> 1840 bytes ...stWindowsDriver.test_create_volume_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_create_volume_wmi.p.gz | Bin 0 -> 1057 bytes ...WindowsDriver.test_delete_snapshot_os.p.gz | Bin 0 -> 441 bytes ...indowsDriver.test_delete_snapshot_wmi.p.gz | Bin 0 -> 1504 bytes ...stWindowsDriver.test_delete_volume_os.p.gz | Bin 0 -> 472 bytes ...tWindowsDriver.test_delete_volume_wmi.p.gz | Bin 0 -> 1040 bytes ...stWindowsDriver.test_ensure_export_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_ensure_export_wmi.p.gz | Bin 0 -> 1453 bytes ...sDriver.test_initialize_connection_os.p.gz | Bin 0 -> 447 bytes ...Driver.test_initialize_connection_wmi.p.gz | Bin 0 -> 1982 bytes ...stWindowsDriver.test_remove_export_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_remove_export_wmi.p.gz | Bin 0 -> 1462 bytes cinder/tests/windows/windowsutils.py | 145 + cinder/tests/xenapi/__init__.py | 0 cinder/utils.py | 1216 ++++ cinder/version.py | 25 + cinder/volume/__init__.py | 25 + cinder/volume/api.py | 765 +++ cinder/volume/configuration.py | 83 + cinder/volume/driver.py | 551 ++ cinder/volume/drivers/__init__.py | 22 + cinder/volume/drivers/coraid.py | 424 ++ cinder/volume/drivers/emc/__init__.py | 0 .../drivers/emc/cinder_emc_config.xml.sample | 12 + cinder/volume/drivers/emc/emc_smis_common.py | 1564 +++++ cinder/volume/drivers/emc/emc_smis_iscsi.py | 246 + cinder/volume/drivers/glusterfs.py | 283 + cinder/volume/drivers/huawei/__init__.py | 15 + .../huawei/cinder_huawei_conf.xml.sample | 34 + cinder/volume/drivers/huawei/huawei_iscsi.py | 1547 +++++ cinder/volume/drivers/lvm.py | 688 ++ cinder/volume/drivers/netapp/__init__.py | 0 cinder/volume/drivers/netapp/api.py | 410 ++ cinder/volume/drivers/netapp/iscsi.py | 2528 +++++++ cinder/volume/drivers/netapp/nfs.py | 624 ++ cinder/volume/drivers/nexenta/__init__.py | 33 + cinder/volume/drivers/nexenta/jsonrpc.py | 84 + cinder/volume/drivers/nexenta/volume.py | 353 + cinder/volume/drivers/nfs.py | 357 + cinder/volume/drivers/rbd.py | 306 + cinder/volume/drivers/san/__init__.py | 27 + cinder/volume/drivers/san/hp/__init__.py | 0 .../volume/drivers/san/hp/hp_3par_common.py | 742 ++ cinder/volume/drivers/san/hp/hp_3par_fc.py | 259 + cinder/volume/drivers/san/hp/hp_3par_iscsi.py | 279 + cinder/volume/drivers/san/hp_lefthand.py | 314 + cinder/volume/drivers/san/san.py | 177 + cinder/volume/drivers/san/solaris.py | 285 + cinder/volume/drivers/scality.py | 261 + cinder/volume/drivers/sheepdog.py | 141 + cinder/volume/drivers/solidfire.py | 590 ++ cinder/volume/drivers/storwize_svc.py | 1627 +++++ cinder/volume/drivers/windows.py | 246 + cinder/volume/drivers/xenapi/__init__.py | 13 + cinder/volume/drivers/xenapi/lib.py | 542 ++ cinder/volume/drivers/xenapi/sm.py | 272 + cinder/volume/drivers/xenapi/tools.py | 7 + cinder/volume/drivers/xiv.py | 122 + cinder/volume/drivers/zadara.py | 491 ++ cinder/volume/manager.py | 725 ++ cinder/volume/rpcapi.py | 130 + cinder/volume/utils.py | 131 + cinder/volume/volume_types.py | 158 + cinder/wsgi.py | 493 ++ contrib/redhat-eventlet.patch | 16 + doc/.gitignore | 3 + doc/Makefile | 97 + doc/README.rst | 55 + doc/ext/__init__.py | 0 doc/ext/cinder_autodoc.py | 12 + doc/ext/cinder_todo.py | 104 + doc/find_autodoc_modules.sh | 20 + doc/generate_autodoc_index.sh | 46 + doc/source/_ga/layout.html | 17 + doc/source/_static/.gitignore | 0 doc/source/_static/.placeholder | 0 doc/source/_static/basic.css | 416 ++ doc/source/_static/default.css | 230 + doc/source/_static/jquery.tweet.js | 154 + doc/source/_static/tweaks.css | 218 + doc/source/_templates/.gitignore | 0 doc/source/_templates/.placeholder | 0 doc/source/_theme/layout.html | 95 + doc/source/_theme/theme.conf | 5 + doc/source/conf.py | 244 + doc/source/devref/addmethod.openstackapi.rst | 56 + doc/source/devref/api.rst | 167 + doc/source/devref/architecture.rst | 53 + doc/source/devref/auth.rst | 257 + doc/source/devref/cinder.rst | 215 + doc/source/devref/database.rst | 63 + doc/source/devref/development.environment.rst | 152 + doc/source/devref/fakes.rst | 85 + doc/source/devref/gerrit.rst | 16 + doc/source/devref/il8n.rst | 33 + doc/source/devref/index.rst | 81 + doc/source/devref/jenkins.rst | 37 + doc/source/devref/launchpad.rst | 54 + doc/source/devref/rpc.rst | 151 + doc/source/devref/scheduler.rst | 61 + doc/source/devref/services.rst | 55 + doc/source/devref/threading.rst | 51 + doc/source/devref/unit_tests.rst | 159 + doc/source/devref/volume.rst | 64 + doc/source/images/rpc/arch.png | Bin 0 -> 26690 bytes doc/source/images/rpc/arch.svg | 292 + doc/source/images/rpc/flow1.png | Bin 0 -> 40982 bytes doc/source/images/rpc/flow1.svg | 617 ++ doc/source/images/rpc/flow2.png | Bin 0 -> 30650 bytes doc/source/images/rpc/flow2.svg | 423 ++ doc/source/images/rpc/rabt.png | Bin 0 -> 44964 bytes doc/source/images/rpc/rabt.svg | 581 ++ doc/source/images/rpc/state.png | Bin 0 -> 38543 bytes doc/source/index.rst | 62 + doc/source/man/cinder-manage.rst | 281 + etc/cinder/api-paste.ini | 62 + etc/cinder/cinder.conf.sample | 1341 ++++ etc/cinder/logging_sample.conf | 76 + etc/cinder/policy.json | 34 + etc/cinder/rootwrap.conf | 27 + etc/cinder/rootwrap.d/share.filters | 36 + etc/cinder/rootwrap.d/volume.filters | 59 + openstack-common.conf | 32 + pylintrc | 38 + run_tests.sh | 182 + setup.cfg | 81 + setup.py | 21 + tools/conf/extract_opts.py | 195 + tools/conf/generate_sample.sh | 24 + tools/enable-pre-commit-hook.sh | 42 + tools/install_venv.py | 72 + tools/install_venv_common.py | 220 + tools/lintstack.py | 199 + tools/lintstack.sh | 59 + tools/patch_tox_venv.py | 38 + tools/pip-requires | 26 + tools/test-requires | 18 + tools/with_venv.sh | 4 + tox.ini | 42 + 532 files changed, 240158 insertions(+) create mode 100644 CONTRIBUTING.md create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 bin/cinder-all create mode 100755 bin/cinder-api create mode 100755 bin/cinder-backup create mode 100755 bin/cinder-clear-rabbit-queues create mode 100755 bin/cinder-manage create mode 100755 bin/cinder-rootwrap create mode 100755 bin/cinder-rpc-zmq-receiver create mode 100755 bin/cinder-scheduler create mode 100755 bin/cinder-share create mode 100755 bin/cinder-volume create mode 100755 bin/cinder-volume-usage-audit create mode 100644 cinder/__init__.py create mode 100644 cinder/api/__init__.py create mode 100644 cinder/api/auth.py create mode 100644 cinder/api/common.py create mode 100644 cinder/api/contrib/__init__.py create mode 100644 cinder/api/contrib/admin_actions.py create mode 100644 cinder/api/contrib/backups.py create mode 100644 cinder/api/contrib/extended_snapshot_attributes.py create mode 100644 cinder/api/contrib/hosts.py create mode 100644 cinder/api/contrib/image_create.py create mode 100644 cinder/api/contrib/quota_classes.py create mode 100644 cinder/api/contrib/quotas.py create mode 100644 cinder/api/contrib/services.py create mode 100644 cinder/api/contrib/share_actions.py create mode 100644 cinder/api/contrib/share_snapshots.py create mode 100644 cinder/api/contrib/shares.py create mode 100644 cinder/api/contrib/types_extra_specs.py create mode 100644 cinder/api/contrib/types_manage.py create mode 100644 cinder/api/contrib/volume_actions.py create mode 100644 cinder/api/contrib/volume_host_attribute.py create mode 100644 cinder/api/contrib/volume_image_metadata.py create mode 100644 cinder/api/contrib/volume_tenant_attribute.py create mode 100644 cinder/api/extensions.py create mode 100644 cinder/api/middleware/__init__.py create mode 100644 cinder/api/middleware/auth.py create mode 100644 cinder/api/middleware/fault.py create mode 100644 cinder/api/middleware/sizelimit.py create mode 100644 cinder/api/openstack/__init__.py create mode 100644 cinder/api/openstack/urlmap.py create mode 100644 cinder/api/openstack/volume/__init__.py create mode 100644 cinder/api/openstack/volume/versions.py create mode 100644 cinder/api/openstack/wsgi.py create mode 100644 cinder/api/schemas/atom-link.rng create mode 100644 cinder/api/schemas/v1.1/extension.rng create mode 100644 cinder/api/schemas/v1.1/extensions.rng create mode 100644 cinder/api/schemas/v1.1/limits.rng create mode 100644 cinder/api/schemas/v1.1/metadata.rng create mode 100644 cinder/api/sizelimit.py create mode 100644 cinder/api/urlmap.py create mode 100644 cinder/api/v1/__init__.py create mode 100644 cinder/api/v1/limits.py create mode 100644 cinder/api/v1/router.py create mode 100644 cinder/api/v1/snapshot_metadata.py create mode 100644 cinder/api/v1/snapshots.py create mode 100644 cinder/api/v1/types.py create mode 100644 cinder/api/v1/volume_metadata.py create mode 100644 cinder/api/v1/volumes.py create mode 100644 cinder/api/v2/__init__.py create mode 100644 cinder/api/v2/limits.py create mode 100644 cinder/api/v2/router.py create mode 100644 cinder/api/v2/snapshot_metadata.py create mode 100644 cinder/api/v2/snapshots.py create mode 100644 cinder/api/v2/types.py create mode 100644 cinder/api/v2/views/__init__.py create mode 100644 cinder/api/v2/views/volumes.py create mode 100644 cinder/api/v2/volumes.py create mode 100644 cinder/api/versions.py create mode 100644 cinder/api/views/__init__.py create mode 100644 cinder/api/views/backups.py create mode 100644 cinder/api/views/limits.py create mode 100644 cinder/api/views/share_snapshots.py create mode 100644 cinder/api/views/shares.py create mode 100644 cinder/api/views/types.py create mode 100644 cinder/api/views/versions.py create mode 100644 cinder/api/xmlutil.py create mode 100644 cinder/backup/__init__.py create mode 100644 cinder/backup/api.py create mode 100755 cinder/backup/manager.py create mode 100644 cinder/backup/rpcapi.py create mode 100644 cinder/backup/services/__init__.py create mode 100644 cinder/backup/services/swift.py create mode 100644 cinder/brick/__init__.py create mode 100644 cinder/brick/iscsi/__init__.py create mode 100644 cinder/brick/iscsi/iscsi.py create mode 100644 cinder/brick/local_dev/__init__.py create mode 100644 cinder/brick/local_dev/lvm.py create mode 100644 cinder/common/__init__.py create mode 100755 cinder/common/sqlalchemyutils.py create mode 100644 cinder/compute/__init__.py create mode 100644 cinder/compute/aggregate_states.py create mode 100644 cinder/context.py create mode 100644 cinder/db/__init__.py create mode 100644 cinder/db/api.py create mode 100644 cinder/db/base.py create mode 100644 cinder/db/migration.py create mode 100644 cinder/db/sqlalchemy/__init__.py create mode 100644 cinder/db/sqlalchemy/api.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/README create mode 100644 cinder/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/manage.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 cinder/db/sqlalchemy/migration.py create mode 100644 cinder/db/sqlalchemy/models.py create mode 100644 cinder/db/sqlalchemy/session.py create mode 100644 cinder/exception.py create mode 100644 cinder/flags.py create mode 100644 cinder/image/__init__.py create mode 100644 cinder/image/glance.py create mode 100644 cinder/image/image_utils.py create mode 100644 cinder/locale/bg_BG/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/bs/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/cinder.pot create mode 100644 cinder/locale/cs/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/da/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/de/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/en_AU/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/en_GB/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/en_US/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/es/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/fi_FI/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/fr/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/it/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ja/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ko/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ko_KR/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/pt_BR/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ru/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/tl/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/tr/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/uk/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/vi_VN/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/zh_CN/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/zh_TW/LC_MESSAGES/cinder.po create mode 100644 cinder/manager.py create mode 100644 cinder/openstack/__init__.py create mode 100644 cinder/openstack/common/README create mode 100644 cinder/openstack/common/__init__.py create mode 100644 cinder/openstack/common/context.py create mode 100644 cinder/openstack/common/eventlet_backdoor.py create mode 100644 cinder/openstack/common/exception.py create mode 100644 cinder/openstack/common/excutils.py create mode 100644 cinder/openstack/common/fileutils.py create mode 100644 cinder/openstack/common/gettextutils.py create mode 100644 cinder/openstack/common/importutils.py create mode 100644 cinder/openstack/common/jsonutils.py create mode 100644 cinder/openstack/common/local.py create mode 100644 cinder/openstack/common/lockutils.py create mode 100644 cinder/openstack/common/log.py create mode 100644 cinder/openstack/common/loopingcall.py create mode 100644 cinder/openstack/common/network_utils.py create mode 100644 cinder/openstack/common/notifier/__init__.py create mode 100644 cinder/openstack/common/notifier/api.py create mode 100644 cinder/openstack/common/notifier/log_notifier.py create mode 100644 cinder/openstack/common/notifier/no_op_notifier.py create mode 100644 cinder/openstack/common/notifier/rabbit_notifier.py create mode 100644 cinder/openstack/common/notifier/rpc_notifier.py create mode 100644 cinder/openstack/common/notifier/rpc_notifier2.py create mode 100644 cinder/openstack/common/notifier/test_notifier.py create mode 100644 cinder/openstack/common/policy.py create mode 100644 cinder/openstack/common/processutils.py create mode 100644 cinder/openstack/common/rootwrap/__init__.py create mode 100755 cinder/openstack/common/rootwrap/cmd.py create mode 100644 cinder/openstack/common/rootwrap/filters.py create mode 100644 cinder/openstack/common/rootwrap/wrapper.py create mode 100644 cinder/openstack/common/rpc/__init__.py create mode 100644 cinder/openstack/common/rpc/amqp.py create mode 100644 cinder/openstack/common/rpc/common.py create mode 100644 cinder/openstack/common/rpc/dispatcher.py create mode 100644 cinder/openstack/common/rpc/impl_fake.py create mode 100644 cinder/openstack/common/rpc/impl_kombu.py create mode 100644 cinder/openstack/common/rpc/impl_qpid.py create mode 100644 cinder/openstack/common/rpc/impl_zmq.py create mode 100644 cinder/openstack/common/rpc/matchmaker.py create mode 100644 cinder/openstack/common/rpc/matchmaker_redis.py create mode 100644 cinder/openstack/common/rpc/proxy.py create mode 100644 cinder/openstack/common/rpc/service.py create mode 100755 cinder/openstack/common/rpc/zmq_receiver.py create mode 100644 cinder/openstack/common/scheduler/__init__.py create mode 100644 cinder/openstack/common/scheduler/filter.py create mode 100644 cinder/openstack/common/scheduler/filters/__init__.py create mode 100644 cinder/openstack/common/scheduler/filters/availability_zone_filter.py create mode 100644 cinder/openstack/common/scheduler/filters/capabilities_filter.py create mode 100644 cinder/openstack/common/scheduler/filters/extra_specs_ops.py create mode 100644 cinder/openstack/common/scheduler/filters/json_filter.py create mode 100644 cinder/openstack/common/scheduler/weight.py create mode 100644 cinder/openstack/common/scheduler/weights/__init__.py create mode 100644 cinder/openstack/common/service.py create mode 100644 cinder/openstack/common/strutils.py create mode 100644 cinder/openstack/common/threadgroup.py create mode 100644 cinder/openstack/common/timeutils.py create mode 100644 cinder/openstack/common/uuidutils.py create mode 100644 cinder/policy.py create mode 100644 cinder/quota.py create mode 100644 cinder/scheduler/__init__.py create mode 100644 cinder/scheduler/chance.py create mode 100644 cinder/scheduler/driver.py create mode 100644 cinder/scheduler/filter_scheduler.py create mode 100644 cinder/scheduler/filters/__init__.py create mode 100644 cinder/scheduler/filters/capacity_filter.py create mode 100644 cinder/scheduler/filters/retry_filter.py create mode 100644 cinder/scheduler/host_manager.py create mode 100644 cinder/scheduler/manager.py create mode 100644 cinder/scheduler/rpcapi.py create mode 100644 cinder/scheduler/scheduler_options.py create mode 100644 cinder/scheduler/simple.py create mode 100644 cinder/scheduler/weights/__init__.py create mode 100644 cinder/scheduler/weights/capacity.py create mode 100644 cinder/service.py create mode 100644 cinder/share/__init__.py create mode 100644 cinder/share/api.py create mode 100644 cinder/share/configuration.py create mode 100644 cinder/share/driver.py create mode 100644 cinder/share/drivers/__init__.py create mode 100644 cinder/share/drivers/lvm.py create mode 100644 cinder/share/drivers/netapp.py create mode 100644 cinder/share/manager.py create mode 100644 cinder/share/rpcapi.py create mode 100644 cinder/test.py create mode 100644 cinder/testing/README.rst create mode 100644 cinder/tests/__init__.py create mode 100644 cinder/tests/api/__init__.py create mode 100644 cinder/tests/api/common.py create mode 100644 cinder/tests/api/contrib/__init__.py create mode 100644 cinder/tests/api/contrib/stubs.py create mode 100644 cinder/tests/api/contrib/test_admin_actions.py create mode 100644 cinder/tests/api/contrib/test_backups.py create mode 100644 cinder/tests/api/contrib/test_extended_snapshot_attributes.py create mode 100644 cinder/tests/api/contrib/test_hosts.py create mode 100644 cinder/tests/api/contrib/test_services.py create mode 100644 cinder/tests/api/contrib/test_share_actions.py create mode 100644 cinder/tests/api/contrib/test_share_snapshots.py create mode 100644 cinder/tests/api/contrib/test_shares.py create mode 100644 cinder/tests/api/contrib/test_types_extra_specs.py create mode 100644 cinder/tests/api/contrib/test_types_manage.py create mode 100644 cinder/tests/api/contrib/test_volume_actions.py create mode 100644 cinder/tests/api/contrib/test_volume_host_attribute.py create mode 100644 cinder/tests/api/contrib/test_volume_image_metadata.py create mode 100644 cinder/tests/api/contrib/test_volume_tenant_attribute.py create mode 100644 cinder/tests/api/extensions/__init__.py create mode 100644 cinder/tests/api/extensions/foxinsocks.py create mode 100644 cinder/tests/api/fakes.py create mode 100644 cinder/tests/api/middleware/__init__.py create mode 100644 cinder/tests/api/middleware/test_auth.py create mode 100644 cinder/tests/api/middleware/test_faults.py create mode 100644 cinder/tests/api/middleware/test_sizelimit.py create mode 100644 cinder/tests/api/openstack/__init__.py create mode 100644 cinder/tests/api/openstack/test_wsgi.py create mode 100644 cinder/tests/api/test_common.py create mode 100644 cinder/tests/api/test_extensions.py create mode 100644 cinder/tests/api/test_router.py create mode 100644 cinder/tests/api/test_wsgi.py create mode 100644 cinder/tests/api/test_xmlutil.py create mode 100644 cinder/tests/api/v1/__init__.py create mode 100644 cinder/tests/api/v1/stubs.py create mode 100644 cinder/tests/api/v1/test_limits.py create mode 100644 cinder/tests/api/v1/test_snapshot_metadata.py create mode 100644 cinder/tests/api/v1/test_snapshots.py create mode 100644 cinder/tests/api/v1/test_types.py create mode 100644 cinder/tests/api/v1/test_volume_metadata.py create mode 100644 cinder/tests/api/v1/test_volumes.py create mode 100644 cinder/tests/api/v2/__init__.py create mode 100644 cinder/tests/api/v2/stubs.py create mode 100644 cinder/tests/api/v2/test_limits.py create mode 100644 cinder/tests/api/v2/test_snapshot_metadata.py create mode 100644 cinder/tests/api/v2/test_snapshots.py create mode 100644 cinder/tests/api/v2/test_types.py create mode 100644 cinder/tests/api/v2/test_volumes.py create mode 100644 cinder/tests/backup/__init__.py create mode 100644 cinder/tests/backup/fake_service.py create mode 100644 cinder/tests/backup/fake_swift_client.py create mode 100644 cinder/tests/brick/__init__.py create mode 100644 cinder/tests/brick/test_brick_lvm.py create mode 100644 cinder/tests/db/__init__.py create mode 100644 cinder/tests/db/fakes.py create mode 100644 cinder/tests/declare_flags.py create mode 100644 cinder/tests/fake_driver.py create mode 100644 cinder/tests/fake_flags.py create mode 100644 cinder/tests/fake_utils.py create mode 100644 cinder/tests/glance/__init__.py create mode 100644 cinder/tests/glance/stubs.py create mode 100644 cinder/tests/image/__init__.py create mode 100644 cinder/tests/image/fake.py create mode 100644 cinder/tests/image/test_glance.py create mode 100644 cinder/tests/integrated/__init__.py create mode 100644 cinder/tests/integrated/api/__init__.py create mode 100644 cinder/tests/integrated/api/client.py create mode 100644 cinder/tests/integrated/integrated_helpers.py create mode 100644 cinder/tests/integrated/test_extensions.py create mode 100644 cinder/tests/integrated/test_login.py create mode 100755 cinder/tests/integrated/test_volumes.py create mode 100644 cinder/tests/integrated/test_xml.py create mode 100644 cinder/tests/monkey_patch_example/__init__.py create mode 100644 cinder/tests/monkey_patch_example/example_a.py create mode 100644 cinder/tests/monkey_patch_example/example_b.py create mode 100644 cinder/tests/policy.json create mode 100644 cinder/tests/runtime_flags.py create mode 100644 cinder/tests/scheduler/__init__.py create mode 100644 cinder/tests/scheduler/fakes.py create mode 100644 cinder/tests/scheduler/test_capacity_weigher.py create mode 100644 cinder/tests/scheduler/test_filter_scheduler.py create mode 100644 cinder/tests/scheduler/test_host_filters.py create mode 100644 cinder/tests/scheduler/test_host_manager.py create mode 100644 cinder/tests/scheduler/test_rpcapi.py create mode 100644 cinder/tests/scheduler/test_scheduler.py create mode 100644 cinder/tests/scheduler/test_scheduler_options.py create mode 100644 cinder/tests/test_HpSanISCSIDriver.py create mode 100644 cinder/tests/test_api.py create mode 100644 cinder/tests/test_backup.py create mode 100644 cinder/tests/test_backup_swift.py create mode 100644 cinder/tests/test_context.py create mode 100644 cinder/tests/test_coraid.py create mode 100644 cinder/tests/test_drivers_compatibility.py create mode 100644 cinder/tests/test_emc.py create mode 100644 cinder/tests/test_exception.py create mode 100644 cinder/tests/test_flags.py create mode 100644 cinder/tests/test_glusterfs.py create mode 100644 cinder/tests/test_hp3par.py create mode 100644 cinder/tests/test_huawei.py create mode 100644 cinder/tests/test_iscsi.py create mode 100644 cinder/tests/test_migrations.conf create mode 100644 cinder/tests/test_migrations.py create mode 100644 cinder/tests/test_misc.py create mode 100644 cinder/tests/test_netapp.py create mode 100644 cinder/tests/test_netapp_nfs.py create mode 100644 cinder/tests/test_nexenta.py create mode 100644 cinder/tests/test_nfs.py create mode 100644 cinder/tests/test_policy.py create mode 100644 cinder/tests/test_quota.py create mode 100644 cinder/tests/test_rbd.py create mode 100644 cinder/tests/test_scality.py create mode 100644 cinder/tests/test_service.py create mode 100644 cinder/tests/test_share.py create mode 100644 cinder/tests/test_share_api.py create mode 100644 cinder/tests/test_share_driver.py create mode 100644 cinder/tests/test_share_lvm.py create mode 100644 cinder/tests/test_share_netapp.py create mode 100644 cinder/tests/test_share_rpcapi.py create mode 100644 cinder/tests/test_sheepdog.py create mode 100644 cinder/tests/test_skip_examples.py create mode 100644 cinder/tests/test_solidfire.py create mode 100755 cinder/tests/test_storwize_svc.py create mode 100644 cinder/tests/test_test.py create mode 100644 cinder/tests/test_test_utils.py create mode 100644 cinder/tests/test_utils.py create mode 100644 cinder/tests/test_volume.py create mode 100644 cinder/tests/test_volume_configuration.py create mode 100644 cinder/tests/test_volume_glance_metadata.py create mode 100644 cinder/tests/test_volume_rpcapi.py create mode 100644 cinder/tests/test_volume_types.py create mode 100644 cinder/tests/test_volume_types_extra_specs.py create mode 100644 cinder/tests/test_volume_utils.py create mode 100644 cinder/tests/test_windows.py create mode 100644 cinder/tests/test_wsgi.py create mode 100644 cinder/tests/test_xenapi_sm.py create mode 100644 cinder/tests/test_xiv.py create mode 100644 cinder/tests/test_zadara.py create mode 100644 cinder/tests/utils.py create mode 100644 cinder/tests/var/ca.crt create mode 100644 cinder/tests/var/certificate.crt create mode 100644 cinder/tests/var/privatekey.key create mode 100644 cinder/tests/windows/__init__.py create mode 100644 cinder/tests/windows/basetestcase.py create mode 100644 cinder/tests/windows/db_fakes.py create mode 100644 cinder/tests/windows/mockproxy.py create mode 100644 cinder/tests/windows/stubs/README.rst create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_check_for_setup_errors_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_wmi.p.gz create mode 100644 cinder/tests/windows/windowsutils.py create mode 100644 cinder/tests/xenapi/__init__.py create mode 100644 cinder/utils.py create mode 100644 cinder/version.py create mode 100644 cinder/volume/__init__.py create mode 100644 cinder/volume/api.py create mode 100644 cinder/volume/configuration.py create mode 100644 cinder/volume/driver.py create mode 100644 cinder/volume/drivers/__init__.py create mode 100644 cinder/volume/drivers/coraid.py create mode 100644 cinder/volume/drivers/emc/__init__.py create mode 100644 cinder/volume/drivers/emc/cinder_emc_config.xml.sample create mode 100644 cinder/volume/drivers/emc/emc_smis_common.py create mode 100644 cinder/volume/drivers/emc/emc_smis_iscsi.py create mode 100644 cinder/volume/drivers/glusterfs.py create mode 100644 cinder/volume/drivers/huawei/__init__.py create mode 100644 cinder/volume/drivers/huawei/cinder_huawei_conf.xml.sample create mode 100644 cinder/volume/drivers/huawei/huawei_iscsi.py create mode 100644 cinder/volume/drivers/lvm.py create mode 100644 cinder/volume/drivers/netapp/__init__.py create mode 100644 cinder/volume/drivers/netapp/api.py create mode 100644 cinder/volume/drivers/netapp/iscsi.py create mode 100644 cinder/volume/drivers/netapp/nfs.py create mode 100644 cinder/volume/drivers/nexenta/__init__.py create mode 100644 cinder/volume/drivers/nexenta/jsonrpc.py create mode 100644 cinder/volume/drivers/nexenta/volume.py create mode 100755 cinder/volume/drivers/nfs.py create mode 100644 cinder/volume/drivers/rbd.py create mode 100644 cinder/volume/drivers/san/__init__.py create mode 100644 cinder/volume/drivers/san/hp/__init__.py create mode 100644 cinder/volume/drivers/san/hp/hp_3par_common.py create mode 100644 cinder/volume/drivers/san/hp/hp_3par_fc.py create mode 100644 cinder/volume/drivers/san/hp/hp_3par_iscsi.py create mode 100644 cinder/volume/drivers/san/hp_lefthand.py create mode 100644 cinder/volume/drivers/san/san.py create mode 100644 cinder/volume/drivers/san/solaris.py create mode 100644 cinder/volume/drivers/scality.py create mode 100644 cinder/volume/drivers/sheepdog.py create mode 100644 cinder/volume/drivers/solidfire.py create mode 100755 cinder/volume/drivers/storwize_svc.py create mode 100644 cinder/volume/drivers/windows.py create mode 100644 cinder/volume/drivers/xenapi/__init__.py create mode 100644 cinder/volume/drivers/xenapi/lib.py create mode 100644 cinder/volume/drivers/xenapi/sm.py create mode 100644 cinder/volume/drivers/xenapi/tools.py create mode 100644 cinder/volume/drivers/xiv.py create mode 100644 cinder/volume/drivers/zadara.py create mode 100644 cinder/volume/manager.py create mode 100644 cinder/volume/rpcapi.py create mode 100644 cinder/volume/utils.py create mode 100644 cinder/volume/volume_types.py create mode 100644 cinder/wsgi.py create mode 100644 contrib/redhat-eventlet.patch create mode 100644 doc/.gitignore create mode 100644 doc/Makefile create mode 100644 doc/README.rst create mode 100644 doc/ext/__init__.py create mode 100644 doc/ext/cinder_autodoc.py create mode 100644 doc/ext/cinder_todo.py create mode 100755 doc/find_autodoc_modules.sh create mode 100755 doc/generate_autodoc_index.sh create mode 100644 doc/source/_ga/layout.html create mode 100644 doc/source/_static/.gitignore create mode 100644 doc/source/_static/.placeholder create mode 100644 doc/source/_static/basic.css create mode 100644 doc/source/_static/default.css create mode 100644 doc/source/_static/jquery.tweet.js create mode 100644 doc/source/_static/tweaks.css create mode 100644 doc/source/_templates/.gitignore create mode 100644 doc/source/_templates/.placeholder create mode 100644 doc/source/_theme/layout.html create mode 100644 doc/source/_theme/theme.conf create mode 100644 doc/source/conf.py create mode 100644 doc/source/devref/addmethod.openstackapi.rst create mode 100644 doc/source/devref/api.rst create mode 100644 doc/source/devref/architecture.rst create mode 100644 doc/source/devref/auth.rst create mode 100644 doc/source/devref/cinder.rst create mode 100644 doc/source/devref/database.rst create mode 100644 doc/source/devref/development.environment.rst create mode 100644 doc/source/devref/fakes.rst create mode 100644 doc/source/devref/gerrit.rst create mode 100644 doc/source/devref/il8n.rst create mode 100644 doc/source/devref/index.rst create mode 100644 doc/source/devref/jenkins.rst create mode 100644 doc/source/devref/launchpad.rst create mode 100644 doc/source/devref/rpc.rst create mode 100644 doc/source/devref/scheduler.rst create mode 100644 doc/source/devref/services.rst create mode 100644 doc/source/devref/threading.rst create mode 100644 doc/source/devref/unit_tests.rst create mode 100644 doc/source/devref/volume.rst create mode 100644 doc/source/images/rpc/arch.png create mode 100644 doc/source/images/rpc/arch.svg create mode 100644 doc/source/images/rpc/flow1.png create mode 100644 doc/source/images/rpc/flow1.svg create mode 100644 doc/source/images/rpc/flow2.png create mode 100644 doc/source/images/rpc/flow2.svg create mode 100644 doc/source/images/rpc/rabt.png create mode 100644 doc/source/images/rpc/rabt.svg create mode 100644 doc/source/images/rpc/state.png create mode 100644 doc/source/index.rst create mode 100644 doc/source/man/cinder-manage.rst create mode 100644 etc/cinder/api-paste.ini create mode 100644 etc/cinder/cinder.conf.sample create mode 100644 etc/cinder/logging_sample.conf create mode 100644 etc/cinder/policy.json create mode 100644 etc/cinder/rootwrap.conf create mode 100644 etc/cinder/rootwrap.d/share.filters create mode 100644 etc/cinder/rootwrap.d/volume.filters create mode 100644 openstack-common.conf create mode 100644 pylintrc create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tools/conf/extract_opts.py create mode 100755 tools/conf/generate_sample.sh create mode 100755 tools/enable-pre-commit-hook.sh create mode 100644 tools/install_venv.py create mode 100644 tools/install_venv_common.py create mode 100755 tools/lintstack.py create mode 100755 tools/lintstack.sh create mode 100644 tools/patch_tox_venv.py create mode 100644 tools/pip-requires create mode 100644 tools/test-requires create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..836c0a0234 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in the "If you're a developer, start here" +section of this page: [http://wiki.openstack.org/HowToContribute](http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer.2C_start_here:) + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at [http://wiki.openstack.org/GerritWorkflow](http://wiki.openstack.org/GerritWorkflow). + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed [on Launchpad](https://bugs.launchpad.net/cinder), +not in GitHub's issue tracker. diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000000..eecb286b51 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,275 @@ +Cinder Style Commandments +======================= + +- Step 1: Read http://www.python.org/dev/peps/pep-0008/ +- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again +- Step 3: Read on + + +General +------- +- Put two newlines between top-level code (funcs, classes, etc) +- Put one newline between methods in classes and anywhere else +- Long lines should be wrapped in parentheses + in preference to using a backslash for line continuation. +- Do not write "except:", use "except Exception:" at the very least +- Include your name with TODOs as in "#TODO(termie)" +- Do not shadow a built-in or reserved word. Example:: + + def list(): + return [1, 2, 3] + + mylist = list() # BAD, shadows `list` built-in + + class Foo(object): + def list(self): + return [1, 2, 3] + + mylist = Foo().list() # OKAY, does not shadow built-in + +- Use the "is not" operator when testing for unequal identities. Example:: + + if not X is Y: # BAD, intended behavior is ambiguous + pass + + if X is not Y: # OKAY, intuitive + pass + +- Use the "not in" operator for evaluating membership in a collection. Example:: + + if not X in Y: # BAD, intended behavior is ambiguous + pass + + if X not in Y: # OKAY, intuitive + pass + + if not (X in Y or X in Z): # OKAY, still better than all those 'not's + pass + + +Imports +------- +- Do not import objects, only modules (*) +- Do not import more than one module per line (*) +- Do not make relative imports +- Order your imports by the full module path +- Organize your imports according to the following template + +(*) exceptions are: + +- imports from ``migrate`` package +- imports from ``sqlalchemy`` package +- imports from ``cinder.db.sqlalchemy.session`` module + +Example:: + + # vim: tabstop=4 shiftwidth=4 softtabstop=4 + {{stdlib imports in human alphabetical order}} + \n + {{third-party lib imports in human alphabetical order}} + \n + {{cinder imports in human alphabetical order}} + \n + \n + {{begin your code}} + + +Human Alphabetical Order Examples +--------------------------------- +Example:: + + import httplib + import logging + import random + import StringIO + import time + import unittest + + import eventlet + import webob.exc + + import cinder.api.ec2 + from cinder.api import openstack + from cinder.auth import users + from cinder.endpoint import cloud + import cinder.flags + from cinder import test + + +Docstrings +---------- +Example:: + + """A one line docstring looks like this and ends in a period.""" + + + """A multi line docstring has a one-line summary, less than 80 characters. + + Then a new paragraph after a newline that explains in more detail any + general information about the function, class or method. Example usages + are also great to have here if it is a complex class for function. + + When writing the docstring for a class, an extra line should be placed + after the closing quotations. For more in-depth explanations for these + decisions see http://www.python.org/dev/peps/pep-0257/ + + If you are going to describe parameters and return values, use Sphinx, the + appropriate syntax is as follows. + + :param foo: the foo parameter + :param bar: the bar parameter + :returns: return_type -- description of the return value + :returns: description of the return value + :raises: AttributeError, KeyError + """ + + +Dictionaries/Lists +------------------ +If a dictionary (dict) or list object is longer than 80 characters, its items +should be split with newlines. Embedded iterables should have their items +indented. Additionally, the last item in the dictionary should have a trailing +comma. This increases readability and simplifies future diffs. + +Example:: + + my_dictionary = { + "image": { + "name": "Just a Snapshot", + "size": 2749573, + "properties": { + "user_id": 12, + "arch": "x86_64", + }, + "things": [ + "thing_one", + "thing_two", + ], + "status": "ACTIVE", + }, + } + + +Calling Methods +--------------- +Calls to methods 80 characters or longer should format each argument with +newlines. This is not a requirement, but a guideline:: + + unnecessarily_long_function_name('string one', + 'string two', + kwarg1=constants.ACTIVE, + kwarg2=['a', 'b', 'c']) + + +Rather than constructing parameters inline, it is better to break things up:: + + list_of_strings = [ + 'what_a_long_string', + 'not as long', + ] + + dict_of_numbers = { + 'one': 1, + 'two': 2, + 'twenty four': 24, + } + + object_one.call_a_method('string three', + 'string four', + kwarg1=list_of_strings, + kwarg2=dict_of_numbers) + + +Internationalization (i18n) Strings +----------------------------------- +In order to support multiple languages, we have a mechanism to support +automatic translations of exception and log strings. + +Example:: + + msg = _("An error occurred") + raise HTTPBadRequest(explanation=msg) + +If you have a variable to place within the string, first internationalize the +template string then do the replacement. + +Example:: + + msg = _("Missing parameter: %s") % ("flavor",) + LOG.error(msg) + +If you have multiple variables to place in the string, use keyword parameters. +This helps our translators reorder parameters when needed. + +Example:: + + msg = _("The server with id %(s_id)s has no key %(m_key)s") + LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) + + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Cinder, please read cinder/testing/README.rst. + + +openstack-common +---------------- + +A number of modules from openstack-common are imported into the project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. + +OpenStack Trademark +------------------- + +OpenStack is a registered trademark of OpenStack, LLC, and uses the +following capitalization: + + OpenStack + + +Commit Messages +--------------- +Using a common format for commit messages will help keep our git history +readable. Follow these guidelines: + + First, provide a brief summary (it is recommended to keep the commit title + under 50 chars). + + The first line of the commit message should provide an accurate + description of the change, not just a reference to a bug or + blueprint. It must be followed by a single blank line. + + If the change relates to a specific driver (libvirt, xenapi, qpid, etc...), + begin the first line of the commit message with the driver name, lowercased, + followed by a colon. + + Following your brief summary, provide a more detailed description of + the patch, manually wrapping the text at 72 characters. This + description should provide enough detail that one does not have to + refer to external resources to determine its high-level functionality. + + Once you use 'git review', two lines will be appended to the commit + message: a blank line followed by a 'Change-Id'. This is important + to correlate this commit with a specific review in Gerrit, and it + should not be modified. + +For further information on constructing high quality commit messages, +and how to split up commits into a series of changes, consult the +project wiki: + + http://wiki.openstack.org/GitCommitMessages diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..68c771a099 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..c978a52dae --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..822401a80a --- /dev/null +++ b/README.rst @@ -0,0 +1,21 @@ +The Choose Your Own Adventure README for Cinder +=============================================== + +You have come across a storage service for an open cloud computing service. +It has identified itself as "Cinder." It was abstracted from the Nova project. + +To monitor it from a distance: follow `@openstack `_ on twitter. + +To tame it for use in your own cloud: read http://docs.openstack.org + +To study its anatomy: read http://cinder.openstack.org + +To dissect it in detail: visit http://github.com/openstack/cinder + +To taunt it with its weaknesses: use http://bugs.launchpad.net/cinder + +To watch it: http://jenkins.openstack.org + +To hack at it: read HACKING + +To cry over its pylint problems: http://jenkins.openstack.org/job/cinder-pylint/violations diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000000..15cd6cb76b --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/cinder-all b/bin/cinder-all new file mode 100755 index 0000000000..9591d1574f --- /dev/null +++ b/bin/cinder-all @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack, LLC +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for All cinder services. + +This script attempts to start all the cinder services in one process. Each +service is started in its own greenthread. Please note that exceptions and +sys.exit() on the starting of a service are logged and the script will +continue attempting to launch the rest of the services. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + LOG = logging.getLogger('cinder.all') + + utils.monkey_patch() + servers = [] + # cinder-api + try: + servers.append(service.WSGIService('osapi_volume')) + except (Exception, SystemExit): + LOG.exception(_('Failed to load osapi_volume')) + + for binary in ['cinder-volume', 'cinder-scheduler']: + try: + servers.append(service.Service.create(binary=binary)) + except (Exception, SystemExit): + LOG.exception(_('Failed to load %s'), binary) + service.serve(*servers) + service.wait() diff --git a/bin/cinder-api b/bin/cinder-api new file mode 100755 index 0000000000..0f05b519c8 --- /dev/null +++ b/bin/cinder-api @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Cinder OS API.""" + +# NOTE(jdg): If we port over multi worker code from Nova +# we'll need to set monkey_patch(os=False), unless +# eventlet is updated/released to fix the root issue + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + server = service.WSGIService('osapi_volume') + service.serve(server) + service.wait() diff --git a/bin/cinder-backup b/bin/cinder-backup new file mode 100755 index 0000000000..71f83006a2 --- /dev/null +++ b/bin/cinder-backup @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume Backup.""" + +import os +import sys + +import eventlet + +eventlet.monkey_patch() + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + launcher = service.ProcessLauncher() + server = service.Service.create(binary='cinder-backup') + launcher.launch_server(server) + launcher.wait() diff --git a/bin/cinder-clear-rabbit-queues b/bin/cinder-clear-rabbit-queues new file mode 100755 index 0000000000..684a56578a --- /dev/null +++ b/bin/cinder-clear-rabbit-queues @@ -0,0 +1,76 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Admin/debug script to wipe rabbitMQ (AMQP) queues cinder uses. + This can be used if you need to change durable options on queues, + or to wipe all messages in the queue system if things are in a + serious bad way. + +""" + +import datetime +import os +import sys +import time + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +delete_exchange_opt = \ + cfg.BoolOpt('delete_exchange', + default=False, + help='delete cinder exchange too.') + +FLAGS = flags.FLAGS +FLAGS.register_cli_opt(delete_exchange_opt) + + +def delete_exchange(exch): + conn = rpc.create_connection() + x = conn.get_channel() + x.exchange_delete(exch) + + +def delete_queues(queues): + conn = rpc.create_connection() + x = conn.get_channel() + for q in queues: + x.queue_delete(q) + +if __name__ == '__main__': + args = flags.parse_args(sys.argv) + logging.setup("cinder") + delete_queues(args[1:]) + if FLAGS.delete_exchange: + delete_exchange(FLAGS.control_exchange) diff --git a/bin/cinder-manage b/bin/cinder-manage new file mode 100755 index 0000000000..63f638fc43 --- /dev/null +++ b/bin/cinder-manage @@ -0,0 +1,820 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Interactive shell based on Django: +# +# Copyright (c) 2005, the Lawrence Journal-World +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of Django nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +""" + CLI interface for cinder management. +""" + +import os +import sys +import uuid + +from sqlalchemy import create_engine, MetaData, Table +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder.db import migration +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import version + +FLAGS = flags.FLAGS + + +# Decorators for actions +def args(*args, **kwargs): + def _decorator(func): + func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) + return func + return _decorator + + +def param2id(object_id): + """Helper function to convert various id types to internal id. + args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' + """ + if uuidutils.is_uuid_like(object_id): + return object_id + elif '-' in object_id: + # FIXME(ja): mapping occurs in nova? + pass + else: + return int(object_id) + + +class ShellCommands(object): + def bpython(self): + """Runs a bpython shell. + + Falls back to Ipython/python shell if unavailable""" + self.run('bpython') + + def ipython(self): + """Runs an Ipython shell. + + Falls back to Python shell if unavailable""" + self.run('ipython') + + def python(self): + """Runs a python shell. + + Falls back to Python shell if unavailable""" + self.run('python') + + @args('--shell', dest="shell", + metavar='', + help='Python shell') + def run(self, shell=None): + """Runs a Python interactive interpreter.""" + if not shell: + shell = 'bpython' + + if shell == 'bpython': + try: + import bpython + bpython.embed() + except ImportError: + shell = 'ipython' + if shell == 'ipython': + try: + import IPython + # Explicitly pass an empty list as arguments, because + # otherwise IPython would use sys.argv from this script. + shell = IPython.Shell.IPShell(argv=[]) + shell.mainloop() + except ImportError: + shell = 'python' + + if shell == 'python': + import code + try: + # Try activating rlcompleter, because it's handy. + import readline + except ImportError: + pass + else: + # We don't have to wrap the following import in a 'try', + # because we already know 'readline' was imported successfully. + import rlcompleter + readline.parse_and_bind("tab:complete") + code.interact() + + @args('--path', required=True, help='Script path') + def script(self, path): + """Runs the script from the specifed path with flags set properly. + arguments: path""" + exec(compile(open(path).read(), path, 'exec'), locals(), globals()) + + +def _db_error(caught_exception): + print caught_exception + print _("The above error may show that the database has not " + "been created.\nPlease create a database using " + "'cinder-manage db sync' before running this command.") + exit(1) + + +class HostCommands(object): + """List hosts.""" + + @args('zone', nargs='?', default=None, + help='Availability Zone (default: %(default)s)') + def list(self, zone=None): + """Show a list of all physical hosts. Filter by zone. + args: [zone]""" + print "%-25s\t%-15s" % (_('host'), + _('zone')) + ctxt = context.get_admin_context() + services = db.service_get_all(ctxt) + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for srv in services: + if not [h for h in hosts if h['host'] == srv['host']]: + hosts.append(srv) + + for h in hosts: + print "%-25s\t%-15s" % (h['host'], h['availability_zone']) + + +class DbCommands(object): + """Class for managing the database.""" + + def __init__(self): + pass + + @args('version', nargs='?', default=None, + help='Database version') + def sync(self, version=None): + """Sync the database up to the most recent version.""" + return migration.db_sync(version) + + def version(self): + """Print the current database version.""" + print migration.db_version() + + +class VersionCommands(object): + """Class for exposing the codebase version.""" + + def __init__(self): + pass + + def list(self): + print(version.version_string()) + + def __call__(self): + self.list() + + +class ImportCommands(object): + """Methods for importing Nova volumes to Cinder. + + EXPECTATIONS: + These methods will do two things: + 1. Import relevant Nova DB info in to Cinder + 2. Import persistent tgt files from Nova to Cinder (see copy_tgt_files) + + If you're using VG's (local storage) for your backend YOU MUST install + Cinder on the same node that you're migrating from. + """ + def __init__(self): + pass + + def _map_table(self, table): + class Mapper(declarative_base()): + __table__ = table + return Mapper + + def _open_session(self, con_info): + # Note(jdg): The echo option below sets whether to dispaly db command + # debug info. + engine = create_engine(con_info, + convert_unicode=True, + echo=False) + session = sessionmaker(bind=engine) + return (session(), engine) + + def _backup_cinder_db(self): + #First, dump the dest_db as a backup incase this goes wrong + cinder_dump = utils.execute('mysqldump', 'cinder') + if 'Dump completed on' in cinder_dump[0]: + with open('./cinder_db_bkup.sql', 'w+') as fo: + for line in cinder_dump: + fo.write(line) + else: + raise exception.InvalidResults() + + def _import_db(self, src_db, dest_db, backup_db): + # Remember order matters due to FK's + table_list = ['sm_flavors', + 'sm_backend_config', + 'snapshots', + 'volume_types', + 'volumes', + 'iscsi_targets', + 'sm_volume', + 'volume_metadata', + 'volume_type_extra_specs'] + + quota_table_list = ['quota_classes', + 'quota_usages', + 'quotas', + 'reservations'] + + if backup_db > 0: + if 'mysql:' not in dest_db: + print (_('Sorry, only mysql backups are supported!')) + raise exception.InvalidRequest() + else: + self._backup_cinder_db() + + (src, src_engine) = self._open_session(src_db) + src_meta = MetaData(bind=src_engine) + (dest, dest_engine) = self._open_session(dest_db) + + # First make sure nova is at Folsom + table = Table('migrate_version', src_meta, autoload=True) + if src.query(table).first().version < 132: + print (_('ERROR: Specified Nova DB is not at a compatible ' + 'migration version!\nNova must be at Folsom or newer ' + 'to import into Cinder database.')) + sys.exit(2) + + for table_name in table_list: + print (_('Importing table %s...') % table_name) + table = Table(table_name, src_meta, autoload=True) + new_row = self._map_table(table) + columns = table.columns.keys() + for row in src.query(table).all(): + data = dict([(str(column), getattr(row, column)) + for column in columns]) + dest.add(new_row(**data)) + dest.commit() + + for table_name in quota_table_list: + print (_('Importing table %s...') % table_name) + table = Table(table_name, src_meta, autoload=True) + new_row = self._map_table(table) + columns = table.columns.keys() + for row in src.query(table).all(): + if row.resource == 'gigabytes' or row.resource == 'volumes': + data = dict([(str(column), getattr(row, column)) + for column in columns]) + dest.add(new_row(**data)) + dest.commit() + + @args('src', metavar='', + help='db-engine://db_user[:passwd]@db_host[:port]\t\t' + 'example: mysql://root:secrete@192.168.137.1') + @args('dest', metavar='', + help='db-engine://db_user[:passwd]@db_host[:port]\t\t' + 'example: mysql://root:secrete@192.168.137.1') + @args('--backup', metavar='<0|1>', choices=[0, 1], default=1, + help='Perform mysqldump of cinder db before writing to it' + ' (default: %(default)d)') + def import_db(self, src_db, dest_db, backup_db=1): + """Import relevant volume DB entries from Nova into Cinder. + + NOTE: + Your Cinder DB should be clean WRT volume entries. + + NOTE: + We take an sqldump of the cinder DB before mods + If you're not using mysql, set backup_db=0 + and create your own backup. + """ + src_db = '%s/nova' % src_db + dest_db = '%s/cinder' % dest_db + self._import_db(src_db, dest_db, backup_db) + + @args('src', + help='e.g. (login@src_host:]/opt/stack/nova/volumes/)') + @args('dest', nargs='?', default=None, + help='e.g. (login@src_host:/opt/stack/cinder/volumes/) ' + 'optional, if emitted, \'volume_dir\' in config will be used') + def copy_ptgt_files(self, src_tgts, dest_tgts=None): + """Copy persistent scsi tgt files from nova to cinder. + + Default destination is FLAGS.volume_dir or state_path/volumes/ + + PREREQUISITES: + Persistent tgts were introduced in Folsom. If you're running + Essex or other release, this script is unnecessary. + + NOTE: + If you're using local VG's and LVM for your nova volume backend + there's no point in copying these files over. Leave them on + your Nova system as they won't do any good here. + """ + if dest_tgts is None: + try: + dest_tgts = FLAGS.volumes_dir + except Exception: + dest_tgts = '%s/volumes' % FLAGS.state_path + + utils.execute('rsync', '-avz', src_tgts, dest_tgts) + + +class VolumeCommands(object): + """Methods for dealing with a cloud in an odd state.""" + + @args('volume_id', + help='Volume ID to be deleted') + def delete(self, volume_id): + """Delete a volume, bypassing the check that it + must be available.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + host = volume['host'] + + if not host: + print "Volume not yet assigned to host." + print "Deleting volume from database and skipping rpc." + db.volume_destroy(ctxt, param2id(volume_id)) + return + + if volume['status'] == 'in-use': + print "Volume is in-use." + print "Detach volume from instance and then try again." + return + + rpc.cast(ctxt, + rpc.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + @args('volume_id', + help='Volume ID to be reattached') + def reattach(self, volume_id): + """Re-attach a volume that has previously been attached + to an instance. Typically called after a compute host + has been rebooted.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + if not volume['instance_id']: + print "volume is not attached to an instance" + return + instance = db.instance_get(ctxt, volume['instance_id']) + host = instance['host'] + rpc.cast(ctxt, + rpc.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume['id'], + "mountpoint": volume['mountpoint']}}) + + +class StorageManagerCommands(object): + """Class for mangaging Storage Backends and Flavors.""" + + @args('flavor', nargs='?', + help='flavor to be listed') + def flavor_list(self, flavor=None): + ctxt = context.get_admin_context() + + try: + if flavor is None: + flavors = db.sm_flavor_get_all(ctxt) + else: + flavors = db.sm_flavor_get(ctxt, flavor) + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-18s\t%-20s\t%s" % (_('id'), + _('Label'), + _('Description')) + + for flav in flavors: + print "%-18s\t%-20s\t%s" % ( + flav['id'], + flav['label'], + flav['description']) + + @args('label', help='flavor label') + @args('desc', help='flavor description') + def flavor_create(self, label, desc): + # TODO(renukaapte) flavor name must be unique + try: + db.sm_flavor_create(context.get_admin_context(), + dict(label=label, + description=desc)) + except exception.DBError, e: + _db_error(e) + + @args('label', help='label of flavor to be deleted') + def flavor_delete(self, label): + try: + db.sm_flavor_delete(context.get_admin_context(), label) + + except exception.DBError, e: + _db_error(e) + + def _splitfun(self, item): + i = item.split("=") + return i[0:2] + + @args('backend_conf_id', nargs='?', default=None) + def backend_list(self, backend_conf_id=None): + ctxt = context.get_admin_context() + + try: + if backend_conf_id is None: + backends = db.sm_backend_conf_get_all(ctxt) + else: + backends = db.sm_backend_conf_get(ctxt, backend_conf_id) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'), + _('Flavor id'), + _('SR UUID'), + _('SR Type'), + _('Config Parameters'),) + + for b in backends: + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'], + b['flavor_id'], + b['sr_uuid'], + b['sr_type'], + b['config_params'],) + + @args('flavor_label') + @args('sr_type') + @args('args', nargs='*') + def backend_add(self, flavor_label, sr_type, *args): + # TODO(renukaapte) Add backend_introduce. + ctxt = context.get_admin_context() + params = dict(map(self._splitfun, args)) + sr_uuid = uuid.uuid4() + + if flavor_label is None: + print "error: backend needs to be associated with flavor" + sys.exit(2) + + try: + flavors = db.sm_flavor_get(ctxt, flavor_label) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + config_params = " ".join( + ['%s=%s' % (key, params[key]) for key in params]) + + if 'sr_uuid' in params: + sr_uuid = params['sr_uuid'] + try: + backend = db.sm_backend_conf_get_by_sr(ctxt, sr_uuid) + except exception.DBError, e: + _db_error(e) + + if backend: + print 'Backend config found. Would you like to recreate this?' + print '(WARNING:Recreating will destroy all VDIs on backend!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_update( + ctxt, backend['id'], + dict(created=False, + flavor_id=flavors['id'], + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + return + + else: + print 'Backend config not found. Would you like to create it?' + + print '(WARNING: Creating will destroy all data on backend!!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_create(ctxt, + dict(flavor_id=flavors['id'], + sr_uuid=sr_uuid, + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + + @args('backend_conf_id') + def backend_remove(self, backend_conf_id): + try: + db.sm_backend_conf_delete(context.get_admin_context(), + backend_conf_id) + + except exception.DBError, e: + _db_error(e) + + +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + for key, value in FLAGS.iteritems(): + if value is not None: + print '%s = %s' % (key, value) + + +class GetLogCommands(object): + """Get logging information.""" + + def errors(self): + """Get all of the errors from the log files.""" + error_found = 0 + if FLAGS.log_dir: + logs = [x for x in os.listdir(FLAGS.log_dir) if x.endswith('.log')] + for file in logs: + log_file = os.path.join(FLAGS.log_dir, file) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print_name = 0 + for index, line in enumerate(lines): + if line.find(" ERROR ") > 0: + error_found += 1 + if print_name == 0: + print log_file + ":-" + print_name = 1 + print "Line %d : %s" % (len(lines) - index, line) + if error_found == 0: + print "No errors in logfiles!" + + @args('num_entries', nargs='?', type=int, default=10, + help='Number of entries to list (default: %(default)d)') + def syslog(self, num_entries=10): + """Get of the cinder syslog events.""" + entries = int(num_entries) + count = 0 + log_file = '' + if os.path.exists('/var/log/syslog'): + log_file = '/var/log/syslog' + elif os.path.exists('/var/log/messages'): + log_file = '/var/log/messages' + else: + print "Unable to find system log file!" + sys.exit(1) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print "Last %s cinder syslog entries:-" % (entries) + for line in lines: + if line.find("cinder") > 0: + count += 1 + print "%s" % (line) + if count == entries: + break + + if count == 0: + print "No cinder entries in syslog!" + + +class BackupCommands(object): + """Methods for managing backups.""" + + def list(self): + """List all backups (including ones in progress) and the host + on which the backup operation is running.""" + ctxt = context.get_admin_context() + backups = db.backup_get_all(ctxt) + + hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" + print hdr % (_('ID'), + _('User ID'), + _('Project ID'), + _('Host'), + _('Name'), + _('Container'), + _('Status'), + _('Size'), + _('Object Count')) + + res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" + for backup in backups: + object_count = 0 + if backup['object_count'] is not None: + object_count = backup['object_count'] + print res % (backup['id'], + backup['user_id'], + backup['project_id'], + backup['host'], + backup['display_name'], + backup['container'], + backup['status'], + backup['size'], + object_count) + + +class ServiceCommands(object): + """Methods for managing services.""" + def list(self): + """Show a list of all cinder services.""" + ctxt = context.get_admin_context() + services = db.service_get_all(ctxt) + print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" + print print_format % ( + _('Binary'), + _('Host'), + _('Zone'), + _('Status'), + _('State'), + _('Updated At')) + for svc in services: + alive = utils.service_is_up(svc) + art = ":-)" if alive else "XXX" + status = 'enabled' + if svc['disabled']: + status = 'disabled' + print print_format % (svc['binary'], svc['host'].partition('.')[0], + svc['availability_zone'], status, art, + svc['updated_at']) + + +CATEGORIES = { + 'backup': BackupCommands, + 'config': ConfigCommands, + 'db': DbCommands, + 'host': HostCommands, + 'logs': GetLogCommands, + 'service': ServiceCommands, + 'shell': ShellCommands, + 'sm': StorageManagerCommands, + 'version': VersionCommands, + 'volume': VolumeCommands, + 'migrate': ImportCommands, +} + + +def methods_of(obj): + """Get all callable methods of an object that don't start with underscore + returns a list of tuples of the form (method_name, method)""" + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result + + +def add_command_parsers(subparsers): + for category in CATEGORIES: + command_object = CATEGORIES[category]() + + parser = subparsers.add_parser(category) + parser.set_defaults(command_object=command_object) + + category_subparsers = parser.add_subparsers(dest='action') + + for (action, action_fn) in methods_of(command_object): + parser = category_subparsers.add_parser(action) + + action_kwargs = [] + for args, kwargs in getattr(action_fn, 'args', []): + parser.add_argument(*args, **kwargs) + + parser.set_defaults(action_fn=action_fn) + parser.set_defaults(action_kwargs=action_kwargs) + + +category_opt = cfg.SubCommandOpt('category', + title='Command categories', + handler=add_command_parsers) + + +def get_arg_string(args): + arg = None + if args[0] == '-': + # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars + # is optional args. Notice that cfg module takes care of + # actual ArgParser so prefix_chars is always '-'. + if args[1] == '-': + # This is long optional arg + arg = args[2:] + else: + arg = args[3:] + else: + arg = args + + return arg + + +def fetch_func_args(func): + fn_args = [] + for args, kwargs in getattr(func, 'args', []): + arg = get_arg_string(args[0]) + fn_args.append(getattr(FLAGS.category, arg)) + + return fn_args + + +def main(): + """Parse options and call the appropriate class/method.""" + FLAGS.register_cli_opt(category_opt) + script_name = sys.argv[0] + if len(sys.argv) < 2: + print(_("\nOpenStack Cinder version: %(version)s\n") % + {'version': version.version_string()}) + print script_name + " category action []" + print _("Available categories:") + for category in CATEGORIES: + print "\t%s" % category + sys.exit(2) + + try: + flags.parse_args(sys.argv) + logging.setup("cinder") + except cfg.ConfigFilesNotFoundError: + cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None + if cfgfile and not os.access(cfgfile, os.R_OK): + st = os.stat(cfgfile) + print _("Could not read %s. Re-running with sudo") % cfgfile + try: + os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) + except Exception: + print _('sudo failed, continuing as if nothing happened') + + print _('Please re-run cinder-manage as root.') + sys.exit(2) + + fn = FLAGS.category.action_fn + + fn_args = fetch_func_args(fn) + fn(*fn_args) + +if __name__ == '__main__': + main() diff --git a/bin/cinder-rootwrap b/bin/cinder-rootwrap new file mode 100755 index 0000000000..cb2a92f5ea --- /dev/null +++ b/bin/cinder-rootwrap @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for OpenStack services + + Filters which commands a service is allowed to run as another user. + + To use this with cinder, you should set the following in + cinder.conf: + rootwrap_config=/etc/cinder/rootwrap.conf + + You also need to let the cinder user run cinder-rootwrap + as root in sudoers: + cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap + /etc/cinder/rootwrap.conf * + + Service packaging should deploy .filters files only on nodes where + they are needed, to avoid allowing more than is necessary. +""" + +import ConfigParser +import logging +import os +import pwd +import signal +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_NOEXECFOUND = 96 + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def _exit_error(execname, message, errorcode, log=True): + print "%s: %s" % (execname, message) + if log: + logging.error(message) + sys.exit(errorcode) + + +if __name__ == '__main__': + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) < 2: + _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) + + configfile = sys.argv.pop(0) + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from cinder.openstack.common.rootwrap import wrapper + + # Load configuration + try: + rawconfig = ConfigParser.RawConfigParser() + rawconfig.read(configfile) + config = wrapper.RootwrapConfig(rawconfig) + except ValueError as exc: + msg = "Incorrect value in %s: %s" % (configfile, exc.message) + _exit_error(execname, msg, RC_BADCONFIG, log=False) + except ConfigParser.Error: + _exit_error(execname, "Incorrect configuration file: %s" % configfile, + RC_BADCONFIG, log=False) + + if config.use_syslog: + wrapper.setup_syslog(execname, + config.syslog_log_facility, + config.syslog_log_level) + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(config.filters_path) + try: + filtermatch = wrapper.match_filter(filters, userargs, + exec_dirs=config.exec_dirs) + if filtermatch: + command = filtermatch.get_command(userargs, + exec_dirs=config.exec_dirs) + if config.use_syslog: + logging.info("(%s > %s) Executing %s (filter match = %s)" % ( + os.getlogin(), pwd.getpwuid(os.getuid())[0], + command, filtermatch.name)) + + obj = subprocess.Popen(command, + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + preexec_fn=_subprocess_setup, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + except wrapper.FilterMatchNotExecutable as exc: + msg = ("Executable not found: %s (filter match = %s)" + % (exc.match.exec_path, exc.match.name)) + _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) + + except wrapper.NoFilterMatched: + msg = ("Unauthorized command: %s (no filter matched)" + % ' '.join(userargs)) + _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) diff --git a/bin/cinder-rpc-zmq-receiver b/bin/cinder-rpc-zmq-receiver new file mode 100755 index 0000000000..e8dbf0effe --- /dev/null +++ b/bin/cinder-rpc-zmq-receiver @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +eventlet.monkey_patch() + +import contextlib +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import impl_zmq + +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) + + +def main(): + CONF(sys.argv[1:], project='cinder') + logging.setup("cinder") + + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() + +if __name__ == '__main__': + main() diff --git a/bin/cinder-scheduler b/bin/cinder-scheduler new file mode 100755 index 0000000000..28edd8bbf9 --- /dev/null +++ b/bin/cinder-scheduler @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Scheduler.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + server = service.Service.create(binary='cinder-scheduler') + service.serve(server) + service.wait() diff --git a/bin/cinder-share b/bin/cinder-share new file mode 100755 index 0000000000..b3ba88d398 --- /dev/null +++ b/bin/cinder-share @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Share.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +FLAGS = flags.FLAGS + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + launcher = service.ProcessLauncher() + if FLAGS.enabled_share_backends: + for backend in FLAGS.enabled_share_backends: + host = "%s@%s" % (FLAGS.host, backend) + server = service.Service.create( + host=host, + service_name=backend) + launcher.launch_server(server) + else: + server = service.Service.create(binary='cinder-share') + launcher.launch_server(server) + launcher.wait() diff --git a/bin/cinder-volume b/bin/cinder-volume new file mode 100755 index 0000000000..9c36238d66 --- /dev/null +++ b/bin/cinder-volume @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +FLAGS = flags.FLAGS + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + launcher = service.ProcessLauncher() + if FLAGS.enabled_backends: + for backend in FLAGS.enabled_backends: + host = "%s@%s" % (FLAGS.host, backend) + server = service.Service.create( + host=host, + service_name=backend) + launcher.launch_server(server) + else: + server = service.Service.create(binary='cinder-volume') + launcher.launch_server(server) + launcher.wait() diff --git a/bin/cinder-volume-usage-audit b/bin/cinder-volume-usage-audit new file mode 100755 index 0000000000..13ea447a9f --- /dev/null +++ b/bin/cinder-volume-usage-audit @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cron script to generate usage notifications for volumes existing during + the audit period. + + Together with the notifications generated by volumes + create/delete/resize, over that time period, this allows an external + system consuming usage notification feeds to calculate volume usage + for each tenant. + + Time periods are specified as 'hour', 'month', 'day' or 'year' + + hour = previous hour. If run at 9:07am, will generate usage for 8-9am. + month = previous month. If the script is run April 1, it will generate + usages for March 1 through March 31. + day = previous day. if run on July 4th, it generates usages for July 3rd. + year = previous year. If run on Jan 1, it generates usages for + Jan 1 through Dec 31 of the previous year. +""" + +import os +import sys +import traceback + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder import utils +import cinder.volume.utils + + +FLAGS = flags.FLAGS + +if __name__ == '__main__': + admin_context = context.get_admin_context() + flags.parse_args(sys.argv) + logging.setup("cinder") + begin, end = utils.last_completed_audit_period() + print _("Starting volume usage audit") + msg = _("Creating usages for %(begin_period)s until %(end_period)s") + print (msg % {"begin_period": str(begin), "end_period": str(end)}) + + extra_info = { + 'audit_period_beginning': str(begin), + 'audit_period_ending': str(end), + } + + volumes = db.volume_get_active_by_window(admin_context, + begin, + end) + print _("Found %d volumes") % len(volumes) + for volume_ref in volumes: + try: + cinder.volume.utils.notify_usage_exists( + admin_context, volume_ref) + except Exception, e: + print traceback.format_exc(e) + + snapshots = db.snapshot_get_active_by_window(admin_context, + begin, + end) + print _("Found %d snapshots") % len(snapshots) + for snapshot_ref in snapshots: + try: + cinder.volume.utils.notify_about_snapshot_usage(admin_context, + snapshot_ref, + 'exists', + extra_info) + except Exception, e: + print traceback.fromat_exc(e) + + print _("Volume usage audit completed") diff --git a/cinder/__init__.py b/cinder/__init__.py new file mode 100644 index 0000000000..d765b088e9 --- /dev/null +++ b/cinder/__init__.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder` -- Cloud IaaS Platform +=================================== + +.. automodule:: cinder + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py new file mode 100644 index 0000000000..fc348ac565 --- /dev/null +++ b/cinder/api/__init__.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap + +from cinder import flags + + +FLAGS = flags.FLAGS + + +def root_app_factory(loader, global_conf, **local_conf): + if not FLAGS.enable_v1_api: + del local_conf['/v1'] + if not FLAGS.enable_v2_api: + del local_conf['/v2'] + return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/auth.py b/cinder/api/auth.py new file mode 100644 index 0000000000..ac673cf71f --- /dev/null +++ b/cinder/api/auth.py @@ -0,0 +1,36 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.middleware import auth +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class CinderKeystoneContext(auth.CinderKeystoneContext): + def __init__(self, application): + LOG.warn(_('cinder.api.auth:CinderKeystoneContext is deprecated. ' + 'Please use ' + 'cinder.api.middleware.auth:CinderKeystoneContext ' + 'instead.')) + super(CinderKeystoneContext, self).__init__(application) + + +def pipeline_factory(loader, global_conf, **local_conf): + LOG.warn(_('cinder.api.auth:pipeline_factory is deprecated. Please use ' + 'cinder.api.middleware.auth:pipeline_factory instead.')) + auth.pipeline_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/common.py b/cinder/api/common.py new file mode 100644 index 0000000000..94bf594c00 --- /dev/null +++ b/cinder/api/common.py @@ -0,0 +1,314 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import urlparse + +import webob + +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1' + + +def get_pagination_params(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If 'limit' is not specified, 0, or + > max_limit, we default to max_limit. Negative values + for either marker or limit will cause + exc.HTTPBadRequest() exceptions to be raised. + + """ + params = {} + if 'limit' in request.GET: + params['limit'] = _get_limit_param(request) + if 'marker' in request.GET: + params['marker'] = _get_marker_param(request) + return params + + +def _get_limit_param(request): + """Extract integer limit from request or fail""" + try: + limit = int(request.GET['limit']) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + return limit + + +def _get_marker_param(request): + """Extract marker id from request or fail""" + return request.GET['marker'] + + +def limited(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to requested offset and limit. + + :param items: A sliceable entity + :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. Negative values for either offset or limit + will cause exc.HTTPBadRequest() exceptions to be raised. + :kwarg max_limit: The maximum number of items to return from 'items' + """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + msg = _('offset param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if offset < 0: + msg = _('offset param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + limit = min(max_limit, limit or max_limit) + range_end = offset + limit + return items[offset:range_end] + + +def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to the requested marker and limit.""" + params = get_pagination_params(request) + + limit = params.get('limit', max_limit) + marker = params.get('marker') + + limit = min(max_limit, limit) + start_index = 0 + if marker: + start_index = -1 + for i, item in enumerate(items): + if 'flavorid' in item: + if item['flavorid'] == marker: + start_index = i + 1 + break + elif item['id'] == marker or item.get('uuid') == marker: + start_index = i + 1 + break + if start_index < 0: + msg = _('marker [%s] not found') % marker + raise webob.exc.HTTPBadRequest(explanation=msg) + range_end = start_index + limit + return items[start_index:range_end] + + +def remove_version_from_href(href): + """Removes the first api version from the href. + + Given: 'http://www.cinder.com/v1.1/123' + Returns: 'http://www.cinder.com/123' + + Given: 'http://www.cinder.com/v1.1' + Returns: 'http://www.cinder.com' + + """ + parsed_url = urlparse.urlsplit(href) + url_parts = parsed_url.path.split('/', 2) + + # NOTE: this should match vX.X or vX + expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') + if expression.match(url_parts[1]): + del url_parts[1] + + new_path = '/'.join(url_parts) + + if new_path == parsed_url.path: + msg = _('href %s does not contain version') % href + LOG.debug(msg) + raise ValueError(msg) + + parsed_url = list(parsed_url) + parsed_url[2] = new_path + return urlparse.urlunsplit(parsed_url) + + +def dict_to_query_str(params): + # TODO(throughnothing): we should just use urllib.urlencode instead of this + # But currently we don't work with urlencoded url's + param_str = "" + for key, val in params.iteritems(): + param_str = param_str + '='.join([str(key), str(val)]) + '&' + + return param_str.rstrip('&') + + +class ViewBuilder(object): + """Model API responses as dictionaries.""" + + _collection_name = None + + def _get_links(self, request, identifier): + return [{"rel": "self", + "href": self._get_href_link(request, identifier), }, + {"rel": "bookmark", + "href": self._get_bookmark_link(request, identifier), }] + + def _get_next_link(self, request, identifier): + """Return href string with proper limit and marker params.""" + params = request.params.copy() + params["marker"] = identifier + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_volume_base_URL) + url = os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name) + return "%s?%s" % (url, dict_to_query_str(params)) + + def _get_href_link(self, request, identifier): + """Return an href string pointing to this object.""" + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_volume_base_URL) + return os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_bookmark_link(self, request, identifier): + """Create a URL that refers to a specific resource.""" + base_url = remove_version_from_href(request.application_url) + base_url = self._update_link_prefix(base_url, + FLAGS.osapi_volume_base_URL) + return os.path.join(base_url, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_collection_links(self, request, items, id_key="uuid"): + """Retrieve 'next' link, if applicable.""" + links = [] + limit = int(request.params.get("limit", 0)) + if limit and limit == len(items): + last_item = items[-1] + if id_key in last_item: + last_item_id = last_item[id_key] + else: + last_item_id = last_item["id"] + links.append({ + "rel": "next", + "href": self._get_next_link(request, last_item_id), + }) + return links + + def _update_link_prefix(self, orig_url, prefix): + if not prefix: + return orig_url + url_parts = list(urlparse.urlsplit(orig_url)) + prefix_parts = list(urlparse.urlsplit(prefix)) + url_parts[0:2] = prefix_parts[0:2] + return urlparse.urlunsplit(url_parts) + + +class MetadataDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = utils.safe_minidom_parse_string(text) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + +class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = utils.safe_minidom_parse_string(text) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +class MetadataXMLDeserializer(wsgi.XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + if metadata_node is None: + return {} + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + def _extract_metadata_container(self, datastring): + dom = utils.safe_minidom_parse_string(datastring) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + def create(self, datastring): + return self._extract_metadata_container(datastring) + + def update_all(self, datastring): + return self._extract_metadata_container(datastring) + + def update(self, datastring): + dom = utils.safe_minidom_parse_string(datastring) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +metadata_nsmap = {None: xmlutil.XMLNS_V11} + + +class MetaItemTemplate(xmlutil.TemplateBuilder): + def construct(self): + sel = xmlutil.Selector('meta', xmlutil.get_items, 0) + root = xmlutil.TemplateElement('meta', selector=sel) + root.set('key', 0) + root.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +class MetadataTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return True + + +class MetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = MetadataTemplateElement('metadata', selector='metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) diff --git a/cinder/api/contrib/__init__.py b/cinder/api/contrib/__init__.py new file mode 100644 index 0000000000..503bc90b8f --- /dev/null +++ b/cinder/api/contrib/__init__.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contrib contains extensions that are shipped with cinder. + +It can't be called 'extensions' because that causes namespacing problems. + +""" + +from cinder.api import extensions +from cinder import flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def standard_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) + + +def select_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, + FLAGS.osapi_volume_ext_list) diff --git a/cinder/api/contrib/admin_actions.py b/cinder/api/contrib/admin_actions.py new file mode 100644 index 0000000000..a45b370776 --- /dev/null +++ b/cinder/api/contrib/admin_actions.py @@ -0,0 +1,174 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +from webob import exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +class AdminController(wsgi.Controller): + """Abstract base class for AdminControllers.""" + + collection = None # api collection to extend + + # FIXME(clayg): this will be hard to keep up-to-date + # Concrete classes can expand or over-ride + valid_status = set([ + 'creating', + 'available', + 'deleting', + 'error', + 'error_deleting', + ]) + + def __init__(self, *args, **kwargs): + super(AdminController, self).__init__(*args, **kwargs) + # singular name of the resource + self.resource_name = self.collection.rstrip('s') + self.volume_api = volume.API() + + def _update(self, *args, **kwargs): + raise NotImplementedError() + + def _get(self, *args, **kwargs): + raise NotImplementedError() + + def _delete(self, *args, **kwargs): + raise NotImplementedError() + + def validate_update(self, body): + update = {} + try: + update['status'] = body['status'] + except (TypeError, KeyError): + raise exc.HTTPBadRequest("Must specify 'status'") + if update['status'] not in self.valid_status: + raise exc.HTTPBadRequest("Must specify a valid status") + return update + + def authorize(self, context, action_name): + # e.g. "snapshot_admin_actions:reset_status" + action = '%s_admin_actions:%s' % (self.resource_name, action_name) + extensions.extension_authorizer('volume', action)(context) + + @wsgi.action('os-reset_status') + def _reset_status(self, req, id, body): + """Reset status on the resource.""" + context = req.environ['cinder.context'] + self.authorize(context, 'reset_status') + update = self.validate_update(body['os-reset_status']) + msg = _("Updating %(resource)s '%(id)s' with '%(update)r'") + LOG.debug(msg, {'resource': self.resource_name, 'id': id, + 'update': update}) + try: + self._update(context, id, update) + except exception.NotFound, e: + raise exc.HTTPNotFound(e) + return webob.Response(status_int=202) + + @wsgi.action('os-force_delete') + def _force_delete(self, req, id, body): + """Delete a resource, bypassing the check that it must be available.""" + context = req.environ['cinder.context'] + self.authorize(context, 'force_delete') + try: + resource = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + self._delete(context, resource, force=True) + return webob.Response(status_int=202) + + +class VolumeAdminController(AdminController): + """AdminController for Volumes.""" + + collection = 'volumes' + valid_status = AdminController.valid_status.union( + set(['attaching', 'in-use', 'detaching'])) + + def _update(self, *args, **kwargs): + db.volume_update(*args, **kwargs) + + def _get(self, *args, **kwargs): + return self.volume_api.get(*args, **kwargs) + + def _delete(self, *args, **kwargs): + return self.volume_api.delete(*args, **kwargs) + + def validate_update(self, body): + update = super(VolumeAdminController, self).validate_update(body) + if 'attach_status' in body: + if body['attach_status'] not in ('detached', 'attached'): + raise exc.HTTPBadRequest("Must specify a valid attach_status") + update['attach_status'] = body['attach_status'] + return update + + @wsgi.action('os-force_detach') + def _force_detach(self, req, id, body): + """ + Roll back a bad detach after the volume been disconnected from + the hypervisor. + """ + context = req.environ['cinder.context'] + self.authorize(context, 'force_detach') + try: + volume = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + self.volume_api.terminate_connection(context, volume, + {}, force=True) + self.volume_api.detach(context, volume) + return webob.Response(status_int=202) + + +class SnapshotAdminController(AdminController): + """AdminController for Snapshots.""" + + collection = 'snapshots' + + def _update(self, *args, **kwargs): + db.snapshot_update(*args, **kwargs) + + def _get(self, *args, **kwargs): + return self.volume_api.get_snapshot(*args, **kwargs) + + def _delete(self, *args, **kwargs): + return self.volume_api.delete_snapshot(*args, **kwargs) + + +class Admin_actions(extensions.ExtensionDescriptor): + """Enable admin actions.""" + + name = "AdminActions" + alias = "os-admin-actions" + namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1" + updated = "2012-08-25T00:00:00+00:00" + + def get_controller_extensions(self): + exts = [] + for class_ in (VolumeAdminController, SnapshotAdminController): + controller = class_() + extension = extensions.ControllerExtension( + self, class_.collection, controller) + exts.append(extension) + return exts diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py new file mode 100644 index 0000000000..02444ac550 --- /dev/null +++ b/cinder/api/contrib/backups.py @@ -0,0 +1,278 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The backups api.""" + +import webob +from webob import exc +from xml.dom import minidom + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import backups as backup_views +from cinder.api import xmlutil +from cinder import backup as backupAPI +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def make_backup(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('container') + elem.set('volume_id') + elem.set('object_count') + elem.set('availability_zone') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('fail_reason') + + +def make_backup_restore(elem): + elem.set('backup_id') + elem.set('volume_id') + + +class BackupTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('backup', selector='backup') + make_backup(root) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class BackupsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('backups') + elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups') + make_backup(elem) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class BackupRestoreTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('restore', selector='restore') + make_backup_restore(root) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class CreateDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = minidom.parseString(string) + backup = self._extract_backup(dom) + return {'body': {'backup': backup}} + + def _extract_backup(self, node): + backup = {} + backup_node = self.find_first_child_named(node, 'backup') + + attributes = ['container', 'display_name', + 'display_description', 'volume_id'] + + for attr in attributes: + if backup_node.getAttribute(attr): + backup[attr] = backup_node.getAttribute(attr) + return backup + + +class RestoreDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = minidom.parseString(string) + restore = self._extract_restore(dom) + return {'body': {'restore': restore}} + + def _extract_restore(self, node): + restore = {} + restore_node = self.find_first_child_named(node, 'restore') + if restore_node.getAttribute('volume_id'): + restore['volume_id'] = restore_node.getAttribute('volume_id') + return restore + + +class BackupsController(wsgi.Controller): + """The Backups API controller for the OpenStack API.""" + + _view_builder_class = backup_views.ViewBuilder + + def __init__(self): + self.backup_api = backupAPI.API() + super(BackupsController, self).__init__() + + @wsgi.serializers(xml=BackupTemplate) + def show(self, req, id): + """Return data about the given backup.""" + LOG.debug(_('show called for member %s'), id) + context = req.environ['cinder.context'] + + try: + backup = self.backup_api.get(context, backup_id=id) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + + return self._view_builder.detail(req, backup) + + def delete(self, req, id): + """Delete a backup.""" + LOG.debug(_('delete called for member %s'), id) + context = req.environ['cinder.context'] + + LOG.audit(_('Delete backup with id: %s'), id, context=context) + + try: + self.backup_api.delete(context, id) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + except exception.InvalidBackup as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + return webob.Response(status_int=202) + + @wsgi.serializers(xml=BackupsTemplate) + def index(self, req): + """Returns a summary list of backups.""" + return self._get_backups(req, is_detail=False) + + @wsgi.serializers(xml=BackupsTemplate) + def detail(self, req): + """Returns a detailed list of backups.""" + return self._get_backups(req, is_detail=True) + + def _get_backups(self, req, is_detail): + """Returns a list of backups, transformed through view builder.""" + context = req.environ['cinder.context'] + backups = self.backup_api.get_all(context) + limited_list = common.limited(backups, req) + + if is_detail: + backups = self._view_builder.detail_list(req, limited_list) + else: + backups = self._view_builder.summary_list(req, limited_list) + return backups + + # TODO(frankm): Add some checks here including + # - whether requested volume_id exists so we can return some errors + # immediately + # - maybe also do validation of swift container name + @wsgi.response(202) + @wsgi.serializers(xml=BackupTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Create a new backup.""" + LOG.debug(_('Creating new backup %s'), body) + if not self.is_valid_body(body, 'backup'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + backup = body['backup'] + volume_id = backup['volume_id'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + container = backup.get('container', None) + name = backup.get('name', None) + description = backup.get('description', None) + + LOG.audit(_("Creating backup of volume %(volume_id)s in container" + " %(container)s"), locals(), context=context) + + try: + new_backup = self.backup_api.create(context, name, description, + volume_id, container) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + + retval = self._view_builder.summary(req, dict(new_backup.iteritems())) + return retval + + @wsgi.response(202) + @wsgi.serializers(xml=BackupRestoreTemplate) + @wsgi.deserializers(xml=RestoreDeserializer) + def restore(self, req, id, body): + """Restore an existing backup to a volume.""" + backup_id = id + LOG.debug(_('Restoring backup %(backup_id)s (%(body)s)') % locals()) + if not self.is_valid_body(body, 'restore'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + restore = body['restore'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + volume_id = restore.get('volume_id', None) + + LOG.audit(_("Restoring backup %(backup_id)s to volume %(volume_id)s"), + locals(), context=context) + + try: + new_restore = self.backup_api.restore(context, + backup_id=backup_id, + volume_id=volume_id) + except exception.InvalidInput as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.InvalidBackup as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + except exception.VolumeSizeExceedsAvailableQuota as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.message, headers={'Retry-After': 0}) + except exception.VolumeLimitExceeded as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.message, headers={'Retry-After': 0}) + + retval = self._view_builder.restore_summary( + req, dict(new_restore.iteritems())) + return retval + + +class Backups(extensions.ExtensionDescriptor): + """Backups support.""" + + name = 'Backups' + alias = 'backups' + namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1' + updated = '2012-12-12T00:00:00+00:00' + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + Backups.alias, BackupsController(), + collection_actions={'detail': 'GET'}, + member_actions={'restore': 'POST'}) + resources.append(res) + return resources diff --git a/cinder/api/contrib/extended_snapshot_attributes.py b/cinder/api/contrib/extended_snapshot_attributes.py new file mode 100644 index 0000000000..f55fa53ac2 --- /dev/null +++ b/cinder/api/contrib/extended_snapshot_attributes.py @@ -0,0 +1,125 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Extended Snapshot Attributes API extension.""" + +from webob import exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import volume + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) +authorize = extensions.soft_extension_authorizer( + 'volume', + 'extended_snapshot_attributes') + + +class ExtendedSnapshotAttributesController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(ExtendedSnapshotAttributesController, self).__init__(*args, + **kwargs) + self.volume_api = volume.API() + + def _get_snapshots(self, context): + snapshots = self.volume_api.get_all_snapshots(context) + rval = dict((snapshot['id'], snapshot) for snapshot in snapshots) + return rval + + def _extend_snapshot(self, context, snapshot, data): + for attr in ['project_id', 'progress']: + key = "%s:%s" % (Extended_snapshot_attributes.alias, attr) + snapshot[key] = data[attr] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + # Attach our slave template to the response object + resp_obj.attach(xml=ExtendedSnapshotAttributeTemplate()) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + explanation = _("Snapshot not found.") + raise exc.HTTPNotFound(explanation=explanation) + + self._extend_snapshot(context, resp_obj.obj['snapshot'], snapshot) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + # Attach our slave template to the response object + resp_obj.attach(xml=ExtendedSnapshotAttributesTemplate()) + + snapshots = list(resp_obj.obj.get('snapshots', [])) + db_snapshots = self._get_snapshots(context) + + for snapshot_object in snapshots: + try: + snapshot_data = db_snapshots[snapshot_object['id']] + except KeyError: + continue + + self._extend_snapshot(context, snapshot_object, snapshot_data) + + +class Extended_snapshot_attributes(extensions.ExtensionDescriptor): + """Extended SnapshotAttributes support.""" + + name = "ExtendedSnapshotAttributes" + alias = "os-extended-snapshot-attributes" + namespace = ("http://docs.openstack.org/volume/ext/" + "extended_snapshot_attributes/api/v1") + updated = "2012-06-19T00:00:00+00:00" + + def get_controller_extensions(self): + controller = ExtendedSnapshotAttributesController() + extension = extensions.ControllerExtension(self, 'snapshots', + controller) + return [extension] + + +def make_snapshot(elem): + elem.set('{%s}project_id' % Extended_snapshot_attributes.namespace, + '%s:project_id' % Extended_snapshot_attributes.alias) + elem.set('{%s}progress' % Extended_snapshot_attributes.namespace, + '%s:progress' % Extended_snapshot_attributes.alias) + + +class ExtendedSnapshotAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + alias = Extended_snapshot_attributes.alias + namespace = Extended_snapshot_attributes.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class ExtendedSnapshotAttributesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + alias = Extended_snapshot_attributes.alias + namespace = Extended_snapshot_attributes.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/hosts.py b/cinder/api/contrib/hosts.py new file mode 100644 index 0000000000..0fc85481d6 --- /dev/null +++ b/cinder/api/contrib/hosts.py @@ -0,0 +1,265 @@ +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The hosts admin extension.""" + +import webob.exc +from xml.parsers import expat + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils +from cinder.volume import api as volume_api + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('volume', 'hosts') + + +class HostIndexTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('hosts') + elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts') + elem.set('service-status') + elem.set('service') + elem.set('zone') + elem.set('service-state') + elem.set('host_name') + elem.set('last-update') + + return xmlutil.MasterTemplate(root, 1) + + +class HostUpdateTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + root.set('status') + + return xmlutil.MasterTemplate(root, 1) + + +class HostActionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + + return xmlutil.MasterTemplate(root, 1) + + +class HostShowTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + elem = xmlutil.make_flat_dict('resource', selector='host', + subselector='resource') + root.append(elem) + + return xmlutil.MasterTemplate(root, 1) + + +class HostDeserializer(wsgi.XMLDeserializer): + def default(self, string): + try: + node = utils.safe_minidom_parse_string(string) + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + updates = {} + for child in node.childNodes[0].childNodes: + updates[child.tagName] = self.extract_text(child) + + return dict(body=updates) + + +def _list_hosts(req, service=None): + """Returns a summary list of hosts.""" + curr_time = timeutils.utcnow() + context = req.environ['cinder.context'] + services = db.service_get_all(context, False) + zone = '' + if 'zone' in req.GET: + zone = req.GET['zone'] + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for host in services: + delta = curr_time - (host['updated_at'] or host['created_at']) + alive = abs(utils.total_seconds(delta)) <= FLAGS.service_down_time + status = (alive and "available") or "unavailable" + active = 'enabled' + if host['disabled']: + active = 'disabled' + LOG.debug('status, active and update: %s, %s, %s' % + (status, active, host['updated_at'])) + hosts.append({'host_name': host['host'], + 'service': host['topic'], + 'zone': host['availability_zone'], + 'service-status': status, + 'service-state': active, + 'last-update': host['updated_at']}) + if service: + hosts = [host for host in hosts + if host["service"] == service] + return hosts + + +def check_host(fn): + """Makes sure that the host exists.""" + def wrapped(self, req, id, service=None, *args, **kwargs): + listed_hosts = _list_hosts(req, service) + hosts = [h["host_name"] for h in listed_hosts] + if id in hosts: + return fn(self, req, id, *args, **kwargs) + else: + message = _("Host '%s' could not be found.") % id + raise webob.exc.HTTPNotFound(explanation=message) + return wrapped + + +class HostController(object): + """The Hosts API controller for the OpenStack API.""" + def __init__(self): + self.api = volume_api.HostAPI() + super(HostController, self).__init__() + + @wsgi.serializers(xml=HostIndexTemplate) + def index(self, req): + authorize(req.environ['cinder.context']) + return {'hosts': _list_hosts(req)} + + @wsgi.serializers(xml=HostUpdateTemplate) + @wsgi.deserializers(xml=HostDeserializer) + @check_host + def update(self, req, id, body): + authorize(req.environ['cinder.context']) + update_values = {} + for raw_key, raw_val in body.iteritems(): + key = raw_key.lower().strip() + val = raw_val.lower().strip() + if key == "status": + if val in ("enable", "disable"): + update_values['status'] = val.startswith("enable") + else: + explanation = _("Invalid status: '%s'") % raw_val + raise webob.exc.HTTPBadRequest(explanation=explanation) + else: + explanation = _("Invalid update setting: '%s'") % raw_key + raise webob.exc.HTTPBadRequest(explanation=explanation) + update_setters = {'status': self._set_enabled_status} + result = {} + for key, value in update_values.iteritems(): + result.update(update_setters[key](req, id, value)) + return result + + def _set_enabled_status(self, req, host, enabled): + """Sets the specified host's ability to accept new volumes.""" + context = req.environ['cinder.context'] + state = "enabled" if enabled else "disabled" + LOG.audit(_("Setting host %(host)s to %(state)s.") % locals()) + result = self.api.set_host_enabled(context, + host=host, + enabled=enabled) + if result not in ("enabled", "disabled"): + # An error message was returned + raise webob.exc.HTTPBadRequest(explanation=result) + return {"host": host, "status": result} + + @wsgi.serializers(xml=HostShowTemplate) + def show(self, req, id): + """Shows the volume usage info given by hosts. + + :param context: security context + :param host: hostname + :returns: expected to use HostShowTemplate. + ex.:: + + {'host': {'resource':D},..} + D: {'host': 'hostname','project': 'admin', + 'volume_count': 1, 'total_volume_gb': 2048} + """ + host = id + context = req.environ['cinder.context'] + if not context.is_admin: + msg = _("Describe-resource is admin only functionality") + raise webob.exc.HTTPForbidden(explanation=msg) + + try: + host_ref = db.service_get_by_host_and_topic(context, + host, + FLAGS.volume_topic) + except exception.ServiceNotFound: + raise webob.exc.HTTPNotFound(explanation=_("Host not found")) + + # Getting total available/used resource + # TODO(jdg): Add summary info for Snapshots + volume_refs = db.volume_get_all_by_host(context, host_ref['host']) + (count, sum) = db.volume_data_get_for_host(context, + host_ref['host']) + + snap_count_total = 0 + snap_sum_total = 0 + resources = [{'resource': {'host': host, 'project': '(total)', + 'volume_count': str(count), + 'total_volume_gb': str(sum), + 'snapshot_count': str(snap_count_total), + 'total_snapshot_gb': str(snap_sum_total)}}] + + project_ids = [v['project_id'] for v in volume_refs] + project_ids = list(set(project_ids)) + for project_id in project_ids: + (count, sum) = db.volume_data_get_for_project(context, project_id) + (snap_count, snap_sum) = db.snapshot_data_get_for_project( + context, + project_id) + resources.append( + {'resource': + {'host': host, + 'project': project_id, + 'volume_count': str(count), + 'total_volume_gb': str(sum), + 'snapshot_count': str(snap_count), + 'total_snapshot_gb': str(snap_sum)}}) + snap_count_total += int(snap_count) + snap_sum_total += int(snap_sum) + resources[0]['resource']['snapshot_count'] = str(snap_count_total) + resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total) + return {"host": resources} + + +class Hosts(extensions.ExtensionDescriptor): + """Admin-only host administration""" + + name = "Hosts" + alias = "os-hosts" + namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1" + updated = "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [extensions.ResourceExtension('os-hosts', + HostController(), + collection_actions={ + 'update': 'PUT'}, + member_actions={ + 'startup': 'GET', + 'shutdown': 'GET', + 'reboot': 'GET'})] + return resources diff --git a/cinder/api/contrib/image_create.py b/cinder/api/contrib/image_create.py new file mode 100644 index 0000000000..a3ee923e72 --- /dev/null +++ b/cinder/api/contrib/image_create.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NTT. +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Create Volume from Image extension.""" + + +from cinder.api import extensions + + +class Image_create(extensions.ExtensionDescriptor): + """Allow creating a volume from an image in the Create Volume v1 API""" + + name = "CreateVolumeExtension" + alias = "os-image-create" + namespace = "http://docs.openstack.org/volume/ext/image-create/api/v1" + updated = "2012-08-13T00:00:00+00:00" diff --git a/cinder/api/contrib/quota_classes.py b/cinder/api/contrib/quota_classes.py new file mode 100644 index 0000000000..597ccd004c --- /dev/null +++ b/cinder/api/contrib/quota_classes.py @@ -0,0 +1,103 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder import quota + + +QUOTAS = quota.QUOTAS + + +authorize = extensions.extension_authorizer('volume', 'quota_classes') + + +class QuotaClassTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('quota_class_set', + selector='quota_class_set') + root.set('id') + + for resource in QUOTAS.resources: + elem = xmlutil.SubTemplateElement(root, resource) + elem.text = resource + + return xmlutil.MasterTemplate(root, 1) + + +class QuotaClassSetsController(object): + + def _format_quota_set(self, quota_class, quota_set): + """Convert the quota object to a result dict""" + + result = dict(id=str(quota_class)) + + for resource in QUOTAS.resources: + result[resource] = quota_set[resource] + + return dict(quota_class_set=result) + + @wsgi.serializers(xml=QuotaClassTemplate) + def show(self, req, id): + context = req.environ['cinder.context'] + authorize(context) + try: + db.sqlalchemy.api.authorize_quota_class_context(context, id) + except exception.NotAuthorized: + raise webob.exc.HTTPForbidden() + + return self._format_quota_set(id, + QUOTAS.get_class_quotas(context, id)) + + @wsgi.serializers(xml=QuotaClassTemplate) + def update(self, req, id, body): + context = req.environ['cinder.context'] + authorize(context) + quota_class = id + for key in body['quota_class_set'].keys(): + if key in QUOTAS: + value = int(body['quota_class_set'][key]) + try: + db.quota_class_update(context, quota_class, key, value) + except exception.QuotaClassNotFound: + db.quota_class_create(context, quota_class, key, value) + except exception.AdminRequired: + raise webob.exc.HTTPForbidden() + return {'quota_class_set': QUOTAS.get_class_quotas(context, + quota_class)} + + +class Quota_classes(extensions.ExtensionDescriptor): + """Quota classes management support""" + + name = "QuotaClasses" + alias = "os-quota-class-sets" + namespace = ("http://docs.openstack.org/volume/ext/" + "quota-classes-sets/api/v1.1") + updated = "2012-03-12T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quota-class-sets', + QuotaClassSetsController()) + resources.append(res) + + return resources diff --git a/cinder/api/contrib/quotas.py b/cinder/api/contrib/quotas.py new file mode 100644 index 0000000000..374eed1803 --- /dev/null +++ b/cinder/api/contrib/quotas.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder.db.sqlalchemy import api as sqlalchemy_api +from cinder import exception +from cinder import quota + + +QUOTAS = quota.QUOTAS + + +authorize_update = extensions.extension_authorizer('compute', 'quotas:update') +authorize_show = extensions.extension_authorizer('compute', 'quotas:show') + + +class QuotaTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('quota_set', selector='quota_set') + root.set('id') + + for resource in QUOTAS.resources: + elem = xmlutil.SubTemplateElement(root, resource) + elem.text = resource + + return xmlutil.MasterTemplate(root, 1) + + +class QuotaSetsController(object): + + def _format_quota_set(self, project_id, quota_set): + """Convert the quota object to a result dict""" + + result = dict(id=str(project_id)) + + for resource in QUOTAS.resources: + result[resource] = quota_set[resource] + + return dict(quota_set=result) + + def _validate_quota_limit(self, limit): + # NOTE: -1 is a flag value for unlimited + if limit < -1: + msg = _("Quota limit must be -1 or greater.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_quotas(self, context, id, usages=False): + values = QUOTAS.get_project_quotas(context, id, usages=usages) + + if usages: + return values + else: + return dict((k, v['limit']) for k, v in values.items()) + + @wsgi.serializers(xml=QuotaTemplate) + def show(self, req, id): + context = req.environ['cinder.context'] + authorize_show(context) + try: + sqlalchemy_api.authorize_project_context(context, id) + except exception.NotAuthorized: + raise webob.exc.HTTPForbidden() + + return self._format_quota_set(id, self._get_quotas(context, id)) + + @wsgi.serializers(xml=QuotaTemplate) + def update(self, req, id, body): + context = req.environ['cinder.context'] + authorize_update(context) + project_id = id + for key in body['quota_set'].keys(): + if key in QUOTAS: + value = int(body['quota_set'][key]) + self._validate_quota_limit(value) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + except exception.AdminRequired: + raise webob.exc.HTTPForbidden() + return {'quota_set': self._get_quotas(context, id)} + + @wsgi.serializers(xml=QuotaTemplate) + def defaults(self, req, id): + context = req.environ['cinder.context'] + authorize_show(context) + return self._format_quota_set(id, QUOTAS.get_defaults(context)) + + +class Quotas(extensions.ExtensionDescriptor): + """Quotas management support""" + + name = "Quotas" + alias = "os-quota-sets" + namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" + updated = "2011-08-08T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quota-sets', + QuotaSetsController(), + member_actions={'defaults': 'GET'}) + resources.append(res) + + return resources diff --git a/cinder/api/contrib/services.py b/cinder/api/contrib/services.py new file mode 100644 index 0000000000..3efac96f63 --- /dev/null +++ b/cinder/api/contrib/services.py @@ -0,0 +1,139 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import webob.exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils + + +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('volume', 'services') + + +class ServicesIndexTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('services') + elem = xmlutil.SubTemplateElement(root, 'service', selector='services') + elem.set('binary') + elem.set('host') + elem.set('zone') + elem.set('status') + elem.set('state') + elem.set('update_at') + + return xmlutil.MasterTemplate(root, 1) + + +class ServicesUpdateTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + root.set('service') + root.set('disabled') + + return xmlutil.MasterTemplate(root, 1) + + +class ServiceController(object): + @wsgi.serializers(xml=ServicesIndexTemplate) + def index(self, req): + """ + Return a list of all running services. Filter by host & service name. + """ + context = req.environ['cinder.context'] + authorize(context) + now = timeutils.utcnow() + services = db.service_get_all(context) + + host = '' + if 'host' in req.GET: + host = req.GET['host'] + service = '' + if 'service' in req.GET: + service = req.GET['service'] + if host: + services = [s for s in services if s['host'] == host] + if service: + services = [s for s in services if s['binary'] == service] + + svcs = [] + for svc in services: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = abs(utils.total_seconds(delta)) + art = (alive and "up") or "down" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + svcs.append({"binary": svc['binary'], 'host': svc['host'], + 'zone': svc['availability_zone'], + 'status': active, 'state': art, + 'updated_at': svc['updated_at']}) + return {'services': svcs} + + @wsgi.serializers(xml=ServicesUpdateTemplate) + def update(self, req, id, body): + """Enable/Disable scheduling for a service""" + context = req.environ['cinder.context'] + authorize(context) + + if id == "enable": + disabled = False + elif id == "disable": + disabled = True + else: + raise webob.exc.HTTPNotFound("Unknown action") + + try: + host = body['host'] + service = body['service'] + except (TypeError, KeyError): + raise webob.exc.HTTPBadRequest() + + try: + svc = db.service_get_by_args(context, host, service) + if not svc: + raise webob.exc.HTTPNotFound('Unknown service') + + db.service_update(context, svc['id'], {'disabled': disabled}) + except exception.ServiceNotFound: + raise webob.exc.HTTPNotFound("service not found") + + return {'host': host, 'service': service, 'disabled': disabled} + + +class Services(extensions.ExtensionDescriptor): + """Services support""" + + name = "Services" + alias = "os-services" + namespace = "http://docs.openstack.org/volume/ext/services/api/v2" + updated = "2012-10-28T00:00:00-00:00" + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('os-services', + ServiceController()) + resources.append(resource) + return resources diff --git a/cinder/api/contrib/share_actions.py b/cinder/api/contrib/share_actions.py new file mode 100644 index 0000000000..33040e722a --- /dev/null +++ b/cinder/api/contrib/share_actions.py @@ -0,0 +1,80 @@ +# Copyright 2013 NetApp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import share + + +class ShareActionsController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(ShareActionsController, self).__init__(*args, **kwargs) + self.share_api = share.API() + + @wsgi.action('os-allow_access') + def _allow_access(self, req, id, body): + """Add share access rule.""" + context = req.environ['cinder.context'] + + share = self.share_api.get(context, id) + + access_type = body['os-allow_access']['access_type'] + access_to = body['os-allow_access']['access_to'] + + self.share_api.allow_access(context, share, access_type, access_to) + return webob.Response(status_int=202) + + @wsgi.action('os-deny_access') + def _deny_access(self, req, id, body): + """Remove access rule.""" + context = req.environ['cinder.context'] + + access_id = body['os-deny_access']['access_id'] + + try: + access = self.share_api.access_get(context, access_id) + if access.share_id != id: + raise exception.NotFound() + share = self.share_api.get(context, id) + except exception.NotFound, error: + raise webob.exc.HTTPNotFound(explanation=unicode(error)) + self.share_api.deny_access(context, share, access) + return webob.Response(status_int=202) + + @wsgi.action('os-access_list') + def _access_list(self, req, id, body): + """list access rules.""" + context = req.environ['cinder.context'] + + share = self.share_api.get(context, id) + access_list = self.share_api.access_get_all(context, share) + return {'access_list': access_list} + + +class Share_actions(extensions.ExtensionDescriptor): + """Enable share actions.""" + + name = 'ShareActions' + alias = 'share-actions' + namespace = '' + updated = '2012-08-14T00:00:00+00:00' + + def get_controller_extensions(self): + controller = ShareActionsController() + extension = extensions.ControllerExtension(self, 'shares', + controller) + return [extension] diff --git a/cinder/api/contrib/share_snapshots.py b/cinder/api/contrib/share_snapshots.py new file mode 100644 index 0000000000..56c4affff2 --- /dev/null +++ b/cinder/api/contrib/share_snapshots.py @@ -0,0 +1,181 @@ +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The share snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.contrib import shares +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import share_snapshots as snapshot_views +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import share + + +LOG = logging.getLogger(__name__) + + +def make_snapshot(elem): + elem.set('id') + elem.set('size') + elem.set('status') + elem.set('name') + elem.set('description') + elem.set('share_proto') + elem.set('export_location') + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('share-snapshot', + selector='share-snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('share-snapshots') + elem = xmlutil.SubTemplateElement(root, 'share-snapshot', + selector='share-snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class ShareSnapshotsController(wsgi.Controller): + """The Share Snapshots API controller for the OpenStack API.""" + + _view_builder_class = snapshot_views.ViewBuilder + + def __init__(self): + super(ShareSnapshotsController, self).__init__() + self.share_api = share.API() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + snapshot = self.share_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._view_builder.detail(req, snapshot) + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.share_api.get_snapshot(context, id) + self.share_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._get_snapshots(req, is_detail=False) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._get_snapshots(req, is_detail=True) + + def _get_snapshots(self, req, is_detail): + """Returns a list of snapshots.""" + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + + # NOTE(rushiagr): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + shares.remove_invalid_options(context, search_opts, + self._get_snapshots_search_options()) + + snapshots = self.share_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + if is_detail: + snapshots = self._view_builder.detail_list(req, limited_list) + else: + snapshots = self._view_builder.summary_list(req, limited_list) + return snapshots + + def _get_snapshots_search_options(self): + """Return share search options allowed by non-admin.""" + return ('name', 'status', 'share_id') + + @wsgi.response(202) + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'share-snapshot'): + raise exc.HTTPUnprocessableEntity() + + snapshot = body['share-snapshot'] + + share_id = snapshot['share_id'] + share = self.share_api.get(context, share_id) + msg = _("Create snapshot from share %s") + LOG.audit(msg, share_id, context=context) + + # NOTE(rushiagr): v2 API allows name instead of display_name + if 'name' in snapshot: + snapshot['display_name'] = snapshot.get('name') + del snapshot['name'] + + # NOTE(rushiagr): v2 API allows description instead of + # display_description + if 'description' in snapshot: + snapshot['display_description'] = snapshot.get('description') + del snapshot['description'] + + new_snapshot = self.share_api.create_snapshot( + context, + share, + snapshot.get('display_name'), + snapshot.get('display_description')) + return self._view_builder.summary(req, dict(new_snapshot.iteritems())) + + +class Share_snapshots(extensions.ExtensionDescriptor): + """Enable share snapshtos API.""" + name = 'ShareSnapshots' + alias = 'share-snapshots' + namespace = '' + updated = '2013-03-01T00:00:00+00:00' + + def get_resources(self): + controller = ShareSnapshotsController() + resource = extensions.ResourceExtension( + 'share-snapshots', controller, + collection_actions={'detail': 'GET'}) + return [resource] diff --git a/cinder/api/contrib/shares.py b/cinder/api/contrib/shares.py new file mode 100644 index 0000000000..104087ce1a --- /dev/null +++ b/cinder/api/contrib/shares.py @@ -0,0 +1,215 @@ +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The shares api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import shares as share_views +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import share + + +LOG = logging.getLogger(__name__) + + +def make_share(elem): + elem.set('id') + elem.set('size') + elem.set('availability_zone') + elem.set('status') + elem.set('name') + elem.set('description') + elem.set('share_proto') + elem.set('export_location') + + +def remove_invalid_options(context, search_options, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in search_options + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%(bad_options)s' from query") % locals() + LOG.debug(log_msg) + for opt in unknown_options: + del search_options[opt] + + +class ShareTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('share', selector='share') + make_share(root) + return xmlutil.MasterTemplate(root, 1) + + +class SharesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('shares') + elem = xmlutil.SubTemplateElement(root, 'share', selector='shares') + make_share(elem) + return xmlutil.MasterTemplate(root, 1) + + +class ShareController(wsgi.Controller): + """The Shares API controller for the OpenStack API.""" + + _view_builder_class = share_views.ViewBuilder + + def __init__(self): + super(ShareController, self).__init__() + self.share_api = share.API() + + @wsgi.serializers(xml=ShareTemplate) + def show(self, req, id): + """Return data about the given share.""" + context = req.environ['cinder.context'] + + try: + share = self.share_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._view_builder.detail(req, share) + + def delete(self, req, id): + """Delete a share.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete share with id: %s"), id, context=context) + + try: + share = self.share_api.get(context, id) + self.share_api.delete(context, share) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SharesTemplate) + def index(self, req): + """Returns a summary list of shares.""" + return self._get_shares(req, is_detail=False) + + @wsgi.serializers(xml=SharesTemplate) + def detail(self, req): + """Returns a detailed list of shares.""" + return self._get_shares(req, is_detail=True) + + def _get_shares(self, req, is_detail): + """Returns a list of shares, transformed through view + builder. + """ + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + + # NOTE(rushiagr): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + remove_invalid_options(context, search_opts, + self._get_share_search_options()) + + shares = self.share_api.get_all(context, search_opts=search_opts) + + limited_list = common.limited(shares, req) + + if is_detail: + shares = self._view_builder.detail_list(req, limited_list) + else: + shares = self._view_builder.summary_list(req, limited_list) + return shares + + def _get_share_search_options(self): + """Return share search options allowed by non-admin.""" + return ('name', 'status') + + @wsgi.serializers(xml=ShareTemplate) + def create(self, req, body): + """Creates a new share.""" + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'share'): + raise exc.HTTPUnprocessableEntity() + + share = body['share'] + + # NOTE(rushiagr): v2 API allows name instead of display_name + if share.get('name'): + share['display_name'] = share.get('name') + del share['name'] + + # NOTE(rushiagr): v2 API allows description instead of + # display_description + if share.get('description'): + share['display_description'] = share.get('description') + del share['description'] + + size = share['size'] + share_proto = share['share_proto'].upper() + + msg = (_("Create %(share_proto)s share of %(size)s GB") % + {'share_proto': share_proto, 'size': size}) + LOG.audit(msg, context=context) + + kwargs = {} + kwargs['availability_zone'] = share.get('availability_zone') + + snapshot_id = share.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.share_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + display_name = share.get('display_name') + display_description = share.get('display_description') + new_share = self.share_api.create(context, + share_proto, + size, + display_name, + display_description, + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + return self._view_builder.summary(req, dict(new_share.iteritems())) + + +class Shares(extensions.ExtensionDescriptor): + """Enable share API.""" + name = 'Shares' + alias = 'shares' + namespace = '' + updated = '2013-01-29T00:00:00+00:00' + + def get_resources(self): + controller = ShareController() + resource = extensions.ResourceExtension( + 'shares', controller, collection_actions={'detail': 'GET'}, + member_actions={'action': 'POST'}) + return [resource] diff --git a/cinder/api/contrib/types_extra_specs.py b/cinder/api/contrib/types_extra_specs.py new file mode 100644 index 0000000000..7377f5245d --- /dev/null +++ b/cinder/api/contrib/types_extra_specs.py @@ -0,0 +1,162 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types extra specs extension""" + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.volume import volume_types + +authorize = extensions.extension_authorizer('volume', 'types_extra_specs') + + +class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder): + def construct(self): + tagname = xmlutil.Selector('key') + + def extraspec_sel(obj, do_raise=False): + # Have to extract the key and value for later use... + key, value = obj.items()[0] + return dict(key=key, value=value) + + root = xmlutil.TemplateElement(tagname, selector=extraspec_sel) + root.text = 'value' + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecsController(wsgi.Controller): + """ The volume type extra specs API controller for the OpenStack API """ + + def _get_extra_specs(self, context, type_id): + extra_specs = db.volume_type_extra_specs_get(context, type_id) + specs_dict = {} + for key, value in extra_specs.iteritems(): + specs_dict[key] = value + return dict(extra_specs=specs_dict) + + def _check_type(self, context, type_id): + try: + volume_types.get_volume_type(context, type_id) + except exception.NotFound as ex: + raise webob.exc.HTTPNotFound(explanation=unicode(ex)) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def index(self, req, type_id): + """ Returns the list of extra specs for a given volume type """ + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + return self._get_extra_specs(context, type_id) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def create(self, req, type_id, body=None): + context = req.environ['cinder.context'] + authorize(context) + + if not self.is_valid_body(body, 'extra_specs'): + raise webob.exc.HTTPBadRequest() + + self._check_type(context, type_id) + + specs = body['extra_specs'] + db.volume_type_extra_specs_update_or_create(context, + type_id, + specs) + notifier_info = dict(type_id=type_id, specs=specs) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.create', + notifier_api.INFO, notifier_info) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def update(self, req, type_id, id, body=None): + context = req.environ['cinder.context'] + authorize(context) + if not body: + expl = _('Request body empty') + raise webob.exc.HTTPBadRequest(explanation=expl) + self._check_type(context, type_id) + if id not in body: + expl = _('Request body and URI mismatch') + raise webob.exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise webob.exc.HTTPBadRequest(explanation=expl) + db.volume_type_extra_specs_update_or_create(context, + type_id, + body) + notifier_info = dict(type_id=type_id, id=id) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.update', + notifier_api.INFO, notifier_info) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def show(self, req, type_id, id): + """Return a single extra spec item.""" + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + specs = self._get_extra_specs(context, type_id) + if id in specs['extra_specs']: + return {id: specs['extra_specs'][id]} + else: + raise webob.exc.HTTPNotFound() + + def delete(self, req, type_id, id): + """ Deletes an existing extra spec """ + context = req.environ['cinder.context'] + self._check_type(context, type_id) + authorize(context) + db.volume_type_extra_specs_delete(context, type_id, id) + notifier_info = dict(type_id=type_id, id=id) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.delete', + notifier_api.INFO, notifier_info) + return webob.Response(status_int=202) + + +class Types_extra_specs(extensions.ExtensionDescriptor): + """Types extra specs support""" + + name = "TypesExtraSpecs" + alias = "os-types-extra-specs" + namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension('extra_specs', + VolumeTypeExtraSpecsController(), + parent=dict(member_name='type', + collection_name='types') + ) + resources.append(res) + + return resources diff --git a/cinder/api/contrib/types_manage.py b/cinder/api/contrib/types_manage.py new file mode 100644 index 0000000000..eb17e730be --- /dev/null +++ b/cinder/api/contrib/types_manage.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types manage extension.""" + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.v1 import types +from cinder.api.views import types as views_types +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.volume import volume_types + + +authorize = extensions.extension_authorizer('volume', 'types_manage') + + +class VolumeTypesManageController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + def _notify_voloume_type_error(self, context, method, payload): + notifier_api.notify(context, + 'volumeType', + method, + notifier_api.ERROR, + payload) + + @wsgi.action("create") + @wsgi.serializers(xml=types.VolumeTypeTemplate) + def _create(self, req, body): + """Creates a new volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + if not self.is_valid_body(body, 'volume_type'): + raise webob.exc.HTTPBadRequest() + + vol_type = body['volume_type'] + name = vol_type.get('name', None) + specs = vol_type.get('extra_specs', {}) + + if name is None or name == "": + raise webob.exc.HTTPBadRequest() + + try: + volume_types.create(context, name, specs) + vol_type = volume_types.get_volume_type_by_name(context, name) + notifier_info = dict(volume_types=vol_type) + notifier_api.notify(context, 'volumeType', + 'volume_type.create', + notifier_api.INFO, notifier_info) + + except exception.VolumeTypeExists as err: + notifier_err = dict(volume_types=vol_type, error_message=str(err)) + self._notify_voloume_type_error(context, + 'volume_type.create', + notifier_err) + + raise webob.exc.HTTPConflict(explanation=str(err)) + except exception.NotFound as err: + notifier_err = dict(volume_types=vol_type, error_message=str(err)) + self._notify_voloume_type_error(context, + 'volume_type.create', + notifier_err) + raise webob.exc.HTTPNotFound() + + return self._view_builder.show(req, vol_type) + + @wsgi.action("delete") + def _delete(self, req, id): + """Deletes an existing volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + try: + vol_type = volume_types.get_volume_type(context, id) + volume_types.destroy(context, vol_type['id']) + notifier_info = dict(volume_types=vol_type) + notifier_api.notify(context, 'volumeType', + 'volume_type.delete', + notifier_api.INFO, notifier_info) + except exception.NotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_voloume_type_error(context, + 'volume_type.delete', + notifier_err) + + raise webob.exc.HTTPNotFound() + + return webob.Response(status_int=202) + + +class Types_manage(extensions.ExtensionDescriptor): + """Types manage support.""" + + name = "TypesManage" + alias = "os-types-manage" + namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTypesManageController() + extension = extensions.ControllerExtension(self, 'types', controller) + return [extension] diff --git a/cinder/api/contrib/volume_actions.py b/cinder/api/contrib/volume_actions.py new file mode 100644 index 0000000000..50587c3692 --- /dev/null +++ b/cinder/api/contrib/volume_actions.py @@ -0,0 +1,204 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import common as rpc_common +from cinder import utils +from cinder import volume + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def authorize(context, action_name): + action = 'volume_actions:%s' % action_name + extensions.extension_authorizer('volume', action)(context) + + +class VolumeToImageSerializer(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('os-volume_upload_image', + selector='os-volume_upload_image') + root.set('id') + root.set('updated_at') + root.set('status') + root.set('display_description') + root.set('size') + root.set('volume_type') + root.set('image_id') + root.set('container_format') + root.set('disk_format') + root.set('image_name') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeToImageDeserializer(wsgi.XMLDeserializer): + """Deserializer to handle xml-formatted requests.""" + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + action_node = dom.childNodes[0] + action_name = action_node.tagName + + action_data = {} + attributes = ["force", "image_name", "container_format", "disk_format"] + for attr in attributes: + if action_node.hasAttribute(attr): + action_data[attr] = action_node.getAttribute(attr) + if 'force' in action_data and action_data['force'] == 'True': + action_data['force'] = True + return {'body': {action_name: action_data}} + + +class VolumeActionsController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeActionsController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + @wsgi.action('os-attach') + def _attach(self, req, id, body): + """Add attachment metadata.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + + instance_uuid = body['os-attach']['instance_uuid'] + mountpoint = body['os-attach']['mountpoint'] + + self.volume_api.attach(context, volume, + instance_uuid, mountpoint) + return webob.Response(status_int=202) + + @wsgi.action('os-detach') + def _detach(self, req, id, body): + """Clear attachment metadata.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.detach(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-reserve') + def _reserve(self, req, id, body): + """Mark volume as reserved.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.reserve_volume(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-unreserve') + def _unreserve(self, req, id, body): + """Unmark volume as reserved.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.unreserve_volume(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-begin_detaching') + def _begin_detaching(self, req, id, body): + """Update volume status to 'detaching'.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.begin_detaching(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-roll_detaching') + def _roll_detaching(self, req, id, body): + """Roll back volume status to 'in-use'.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.roll_detaching(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-initialize_connection') + def _initialize_connection(self, req, id, body): + """Initialize volume attachment.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + connector = body['os-initialize_connection']['connector'] + info = self.volume_api.initialize_connection(context, + volume, + connector) + return {'connection_info': info} + + @wsgi.action('os-terminate_connection') + def _terminate_connection(self, req, id, body): + """Terminate volume attachment.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + connector = body['os-terminate_connection']['connector'] + self.volume_api.terminate_connection(context, volume, connector) + return webob.Response(status_int=202) + + @wsgi.response(202) + @wsgi.action('os-volume_upload_image') + @wsgi.serializers(xml=VolumeToImageSerializer) + @wsgi.deserializers(xml=VolumeToImageDeserializer) + def _volume_upload_image(self, req, id, body): + """Uploads the specified volume to image service.""" + context = req.environ['cinder.context'] + try: + params = body['os-volume_upload_image'] + except (TypeError, KeyError): + msg = _("Invalid request body") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not params.get("image_name"): + msg = _("No image_name was specified in request.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + force = params.get('force', False) + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound, error: + raise webob.exc.HTTPNotFound(explanation=unicode(error)) + authorize(context, "upload_image") + image_metadata = {"container_format": params.get("container_format", + "bare"), + "disk_format": params.get("disk_format", "raw"), + "name": params["image_name"]} + try: + response = self.volume_api.copy_volume_to_image(context, + volume, + image_metadata, + force) + except exception.InvalidVolume, error: + raise webob.exc.HTTPBadRequest(explanation=unicode(error)) + except ValueError, error: + raise webob.exc.HTTPBadRequest(explanation=unicode(error)) + except rpc_common.RemoteError as error: + msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, + 'err_msg': error.value} + raise webob.exc.HTTPBadRequest(explanation=msg) + return {'os-volume_upload_image': response} + + +class Volume_actions(extensions.ExtensionDescriptor): + """Enable volume actions + """ + + name = "VolumeActions" + alias = "os-volume-actions" + namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1" + updated = "2012-05-31T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeActionsController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] diff --git a/cinder/api/contrib/volume_host_attribute.py b/cinder/api/contrib/volume_host_attribute.py new file mode 100644 index 0000000000..183e72587d --- /dev/null +++ b/cinder/api/contrib/volume_host_attribute.py @@ -0,0 +1,93 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder.openstack.common import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) +authorize = extensions.soft_extension_authorizer('volume', + 'volume_host_attribute') + + +class VolumeHostAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeHostAttributeController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_volume_host_attribute(self, context, resp_volume): + try: + db_volume = self.volume_api.get(context, resp_volume['id']) + except Exception: + return + else: + key = "%s:host" % Volume_host_attribute.alias + resp_volume[key] = db_volume['host'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeHostAttributeTemplate()) + self._add_volume_host_attribute(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListHostAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_host_attribute(context, volume) + + +class Volume_host_attribute(extensions.ExtensionDescriptor): + """Expose host as an attribute of a volume.""" + + name = "VolumeHostAttribute" + alias = "os-vol-host-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_host_attribute/api/v1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeHostAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}host' % Volume_host_attribute.namespace, + '%s:host' % Volume_host_attribute.alias) + + +class VolumeHostAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_host_attribute.alias + namespace = Volume_host_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListHostAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_host_attribute.alias + namespace = Volume_host_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_image_metadata.py b/cinder/api/contrib/volume_image_metadata.py new file mode 100644 index 0000000000..58e5cd2951 --- /dev/null +++ b/cinder/api/contrib/volume_image_metadata.py @@ -0,0 +1,106 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Volume Image Metadata API extension.""" + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_image_metadata') + + +class VolumeImageMetadataController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeImageMetadataController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_image_metadata(self, context, resp_volume): + try: + image_meta = self.volume_api.get_volume_image_metadata( + context, resp_volume) + except Exception: + return + else: + if image_meta: + resp_volume['volume_image_metadata'] = dict( + image_meta.iteritems()) + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeImageMetadataTemplate()) + self._add_image_metadata(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumesImageMetadataTemplate()) + for volume in list(resp_obj.obj.get('volumes', [])): + self._add_image_metadata(context, volume) + + +class Volume_image_metadata(extensions.ExtensionDescriptor): + """Show image metadata associated with the volume""" + + name = "VolumeImageMetadata" + alias = "os-vol-image-meta" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_image_metadata/api/v1") + updated = "2012-12-07T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeImageMetadataController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +class VolumeImageMetadataMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_image_metadata', + selector='volume_image_metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + + return xmlutil.MasterTemplate(root, 1) + + +class VolumeImageMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + root.append(VolumeImageMetadataMetadataTemplate()) + + alias = Volume_image_metadata.alias + namespace = Volume_image_metadata.namespace + + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumesImageMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volume') + elem.append(VolumeImageMetadataMetadataTemplate()) + + alias = Volume_image_metadata.alias + namespace = Volume_image_metadata.namespace + + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_tenant_attribute.py b/cinder/api/contrib/volume_tenant_attribute.py new file mode 100644 index 0000000000..620ae10065 --- /dev/null +++ b/cinder/api/contrib/volume_tenant_attribute.py @@ -0,0 +1,91 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_tenant_attribute') + + +class VolumeTenantAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeTenantAttributeController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_volume_tenant_attribute(self, context, resp_volume): + try: + db_volume = self.volume_api.get(context, resp_volume['id']) + except Exception: + return + else: + key = "%s:tenant_id" % Volume_tenant_attribute.alias + resp_volume[key] = db_volume['project_id'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeTenantAttributeTemplate()) + self._add_volume_tenant_attribute(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListTenantAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_tenant_attribute(context, volume) + + +class Volume_tenant_attribute(extensions.ExtensionDescriptor): + """Expose the internal project_id as an attribute of a volume.""" + + name = "VolumeTenantAttribute" + alias = "os-vol-tenant-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_tenant_attribute/api/v1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTenantAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}tenant_id' % Volume_tenant_attribute.namespace, + '%s:tenant_id' % Volume_tenant_attribute.alias) + + +class VolumeTenantAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_tenant_attribute.alias + namespace = Volume_tenant_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListTenantAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_tenant_attribute.alias + namespace = Volume_tenant_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/extensions.py b/cinder/api/extensions.py new file mode 100644 index 0000000000..67e682a0cc --- /dev/null +++ b/cinder/api/extensions.py @@ -0,0 +1,407 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import webob.dec +import webob.exc + +import cinder.api.openstack +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import exception as common_exception +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +import cinder.policy + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + + """ + + # The name of the extension, e.g., 'Fox In Socks' + name = None + + # The alias for the extension, e.g., 'FOXNSOX' + alias = None + + # Description comes from the docstring for the class + + # The XML namespace for the extension, e.g., + # 'http://www.fox.in.socks/api/ext/pie/v1.0' + namespace = None + + # The timestamp when the extension was last updated, e.g., + # '2011-01-22T13:25:27-06:00' + updated = None + + def __init__(self, ext_mgr): + """Register extension with the extension manager.""" + + ext_mgr.register(self) + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + + """ + resources = [] + return resources + + def get_controller_extensions(self): + """List of extensions.ControllerExtension extension objects. + + Controller extensions are used to extend existing controllers. + """ + controller_exts = [] + return controller_exts + + @classmethod + def nsmap(cls): + """Synthesize a namespace map from extension.""" + + # Start with a base nsmap + nsmap = ext_nsmap.copy() + + # Add the namespace for the extension + nsmap[cls.alias] = cls.namespace + + return nsmap + + @classmethod + def xmlname(cls, name): + """Synthesize element and attribute names.""" + + return '{%s}%s' % (cls.namespace, name) + + +def make_ext(elem): + elem.set('name') + elem.set('namespace') + elem.set('alias') + elem.set('updated') + + desc = xmlutil.SubTemplateElement(elem, 'description') + desc.text = 'description' + + xmlutil.make_links(elem, 'links') + + +ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class ExtensionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extension', selector='extension') + make_ext(root) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extensions') + elem = xmlutil.SubTemplateElement(root, 'extension', + selector='extensions') + make_ext(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsResource(wsgi.Resource): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + super(ExtensionsResource, self).__init__(None) + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.name + ext_data['alias'] = ext.alias + ext_data['description'] = ext.__doc__ + ext_data['namespace'] = ext.namespace + ext_data['updated'] = ext.updated + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + @wsgi.serializers(xml=ExtensionsTemplate) + def index(self, req): + extensions = [] + for _alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + @wsgi.serializers(xml=ExtensionTemplate) + def show(self, req, id): + try: + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions[id] + except KeyError: + raise webob.exc.HTTPNotFound() + + return dict(extension=self._translate(ext)) + + def delete(self, req, id): + raise webob.exc.HTTPNotFound() + + def create(self, req): + raise webob.exc.HTTPNotFound() + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See cinder/tests/api/extensions/foxinsocks/extension.py for an + example extension implementation. + + """ + + def __init__(self): + LOG.audit(_('Initializing extension manager.')) + + self.cls_list = FLAGS.osapi_volume_extension + self.extensions = {} + self._load_extensions() + + def is_loaded(self, alias): + return alias in self.extensions + + def register(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.alias + LOG.audit(_('Loaded extension: %s'), alias) + + if alias in self.extensions: + raise exception.Error("Found duplicate extension: %s" % alias) + self.extensions[alias] = ext + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionsResource(self))) + + for ext in self.extensions.values(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_controller_extensions(self): + """Returns a list of ControllerExtension objects.""" + controller_exts = [] + for ext in self.extensions.values(): + try: + get_ext_method = ext.get_controller_extensions + except AttributeError: + # NOTE(Vek): Extensions aren't required to have + # controller extensions + continue + controller_exts.extend(get_ext_method()) + return controller_exts + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug(_('Ext name: %s'), extension.name) + LOG.debug(_('Ext alias: %s'), extension.alias) + LOG.debug(_('Ext description: %s'), + ' '.join(extension.__doc__.strip().split())) + LOG.debug(_('Ext namespace: %s'), extension.namespace) + LOG.debug(_('Ext updated: %s'), extension.updated) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + + return True + + def load_extension(self, ext_factory): + """Execute an extension factory. + + Loads an extension. The 'ext_factory' is the name of a + callable that will be imported and called with one + argument--the extension manager. The factory callable is + expected to call the register() method at least once. + """ + + LOG.debug(_("Loading extension %s"), ext_factory) + + # Load the factory + factory = importutils.import_class(ext_factory) + + # Call it + LOG.debug(_("Calling extension factory %s"), ext_factory) + factory(self) + + def _load_extensions(self): + """Load extensions specified on the command line.""" + + extensions = list(self.cls_list) + + # NOTE(thingee): Backwards compat for the old extension loader path. + # We can drop this post-grizzly in the H release. + old_contrib_path = ('cinder.api.openstack.volume.contrib.' + 'standard_extensions') + new_contrib_path = 'cinder.api.contrib.standard_extensions' + if old_contrib_path in extensions: + LOG.warn(_('osapi_volume_extension is set to deprecated path: %s'), + old_contrib_path) + LOG.warn(_('Please set your flag or cinder.conf settings for ' + 'osapi_volume_extension to: %s'), new_contrib_path) + extensions = [e.replace(old_contrib_path, new_contrib_path) + for e in extensions] + + for ext_factory in extensions: + try: + self.load_extension(ext_factory) + except Exception as exc: + LOG.warn(_('Failed to load extension %(ext_factory)s: ' + '%(exc)s') % locals()) + + +class ControllerExtension(object): + """Extend core controllers of cinder OpenStack API. + + Provide a way to extend existing cinder OpenStack API core + controllers. + """ + + def __init__(self, extension, collection, controller): + self.extension = extension + self.collection = collection + self.controller = controller + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in cinder.""" + + def __init__(self, collection, controller, parent=None, + collection_actions=None, member_actions=None, + custom_routes_fn=None): + if not collection_actions: + collection_actions = {} + if not member_actions: + member_actions = {} + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.custom_routes_fn = custom_routes_fn + + +def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): + """Registers all standard API extensions.""" + + # Walk through all the modules in our directory... + our_dir = path[0] + for dirpath, dirnames, filenames in os.walk(our_dir): + # Compute the relative package name from the dirpath + relpath = os.path.relpath(dirpath, our_dir) + if relpath == '.': + relpkg = '' + else: + relpkg = '.%s' % '.'.join(relpath.split(os.sep)) + + # Now, consider each file in turn, only considering .py files + for fname in filenames: + root, ext = os.path.splitext(fname) + + # Skip __init__ and anything that's not .py + if ext != '.py' or root == '__init__': + continue + + # Try loading it + classname = "%s%s" % (root[0].upper(), root[1:]) + classpath = ("%s%s.%s.%s" % + (package, relpkg, root, classname)) + + if ext_list is not None and classname not in ext_list: + logger.debug("Skipping extension: %s" % classpath) + continue + + try: + ext_mgr.load_extension(classpath) + except Exception as exc: + logger.warn(_('Failed to load extension %(classpath)s: ' + '%(exc)s') % locals()) + + # Now, let's consider any subdirectories we may have... + subdirs = [] + for dname in dirnames: + # Skip it if it does not have __init__.py + if not os.path.exists(os.path.join(dirpath, dname, + '__init__.py')): + continue + + # If it has extension(), delegate... + ext_name = ("%s%s.%s.extension" % + (package, relpkg, dname)) + try: + ext = importutils.import_class(ext_name) + except common_exception.NotFound: + # extension() doesn't exist on it, so we'll explore + # the directory for ourselves + subdirs.append(dname) + else: + try: + ext(ext_mgr) + except Exception as exc: + logger.warn(_('Failed to load extension %(ext_name)s: ' + '%(exc)s') % locals()) + + # Update the list of directories we'll explore... + dirnames[:] = subdirs + + +def extension_authorizer(api_name, extension_name): + def authorize(context, target=None): + if target is None: + target = {'project_id': context.project_id, + 'user_id': context.user_id} + action = '%s_extension:%s' % (api_name, extension_name) + cinder.policy.enforce(context, action, target) + return authorize + + +def soft_extension_authorizer(api_name, extension_name): + hard_authorize = extension_authorizer(api_name, extension_name) + + def authorize(context): + try: + hard_authorize(context) + return True + except exception.NotAuthorized: + return False + return authorize diff --git a/cinder/api/middleware/__init__.py b/cinder/api/middleware/__init__.py new file mode 100644 index 0000000000..d65c689a83 --- /dev/null +++ b/cinder/api/middleware/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/middleware/auth.py b/cinder/api/middleware/auth.py new file mode 100644 index 0000000000..6c6e23862c --- /dev/null +++ b/cinder/api/middleware/auth.py @@ -0,0 +1,140 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" +import os + +from oslo.config import cfg +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import context +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import wsgi as base_wsgi + +use_forwarded_for_opt = cfg.BoolOpt( + 'use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(use_forwarded_for_opt) +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[FLAGS.auth_strategy] + if not FLAGS.api_rate_limit: + limit_name = FLAGS.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app + + +class InjectContext(base_wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=base_wsgi.Request) + def __call__(self, req): + req.environ['cinder.context'] = self.context + return self.application + + +class CinderKeystoneContext(base_wsgi.Middleware): + """Make a request context from keystone headers""" + + @webob.dec.wsgify(RequestClass=base_wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + # get the roles + roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + roles=roles, + auth_token=auth_token, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application + + +class NoAuthMiddleware(base_wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'X-Auth-Token' not in req.headers: + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + os_url = os.path.join(req.url, project_id) + res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.content_type = 'text/plain' + res.status = '204' + return res + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/middleware/fault.py b/cinder/api/middleware/fault.py new file mode 100644 index 0000000000..dddd166ac5 --- /dev/null +++ b/cinder/api/middleware/fault.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class FaultWrapper(base_wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" + + _status_to_type = {} + + @staticmethod + def status_to_type(status): + if not FaultWrapper._status_to_type: + for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): + FaultWrapper._status_to_type[clazz.code] = clazz + return FaultWrapper._status_to_type.get( + status, webob.exc.HTTPInternalServerError)() + + def _error(self, inner, req): + LOG.exception(_("Caught error: %s"), unicode(inner)) + + safe = getattr(inner, 'safe', False) + headers = getattr(inner, 'headers', None) + status = getattr(inner, 'code', 500) + if status is None: + status = 500 + + msg_dict = dict(url=req.url, status=status) + LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + outer = self.status_to_type(status) + if headers: + outer.headers = headers + # NOTE(johannes): We leave the explanation empty here on + # purpose. It could possibly have sensitive information + # that should not be returned back to the user. See + # bugs 868360 and 874472 + # NOTE(eglynn): However, it would be over-conservative and + # inconsistent with the EC2 API to hide every exception, + # including those that are safe to expose, see bug 1021373 + if safe: + outer.explanation = '%s: %s' % (inner.__class__.__name__, + unicode(inner)) + return wsgi.Fault(outer) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + return self._error(ex, req) diff --git a/cinder/api/middleware/sizelimit.py b/cinder/api/middleware/sizelimit.py new file mode 100644 index 0000000000..868db0f39e --- /dev/null +++ b/cinder/api/middleware/sizelimit.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Request Body limiting middleware. + +""" + +from oslo.config import cfg +import webob.dec +import webob.exc + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import wsgi + +#default request size is 112k +max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', + default=114688, + help='Max size for body of a request') + +FLAGS = flags.FLAGS +FLAGS.register_opt(max_request_body_size_opt) +LOG = logging.getLogger(__name__) + + +class LimitingReader(object): + """Reader to limit the size of an incoming request.""" + def __init__(self, data, limit): + """ + :param data: Underlying data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + else: + yield chunk + + def read(self, i=None): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + return result + + +class RequestBodySizeLimiter(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, *args, **kwargs): + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if req.content_length > FLAGS.osapi_max_request_body_size: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + if req.content_length is None and req.is_body_readable: + limiter = LimitingReader(req.body_file, + FLAGS.osapi_max_request_body_size) + req.body_file = limiter + return self.application diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py new file mode 100644 index 0000000000..a3b2e6da2e --- /dev/null +++ b/cinder/api/openstack/__init__.py @@ -0,0 +1,130 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack API controllers. +""" + +import routes + +from cinder.api.middleware import fault +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class APIMapper(routes.Mapper): + def routematch(self, url=None, environ=None): + if url is "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + +class ProjectMapper(APIMapper): + def resource(self, member_name, collection_name, **kwargs): + if 'parent_resource' not in kwargs: + kwargs['path_prefix'] = '{project_id}/' + else: + parent_resource = kwargs['parent_resource'] + p_collection = parent_resource['collection_name'] + p_member = parent_resource['member_name'] + kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, + p_member) + routes.Mapper.resource(self, + member_name, + collection_name, + **kwargs) + + +class APIRouter(base_wsgi.Router): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = None # override in subclasses + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have""" + return cls() + + def __init__(self, ext_mgr=None): + if ext_mgr is None: + if self.ExtensionManager: + ext_mgr = self.ExtensionManager() + else: + raise Exception(_("Must specify an ExtensionManager class")) + + mapper = ProjectMapper() + self.resources = {} + self._setup_routes(mapper, ext_mgr) + self._setup_ext_routes(mapper, ext_mgr) + self._setup_extensions(ext_mgr) + super(APIRouter, self).__init__(mapper) + + def _setup_ext_routes(self, mapper, ext_mgr): + for resource in ext_mgr.get_resources(): + LOG.debug(_('Extended resource: %s'), + resource.collection) + + wsgi_resource = wsgi.Resource(resource.controller) + self.resources[resource.collection] = wsgi_resource + kargs = dict( + controller=wsgi_resource, + collection=resource.collection_actions, + member=resource.member_actions) + + if resource.parent: + kargs['parent_resource'] = resource.parent + + mapper.resource(resource.collection, resource.collection, **kargs) + + if resource.custom_routes_fn: + resource.custom_routes_fn(mapper, wsgi_resource) + + def _setup_extensions(self, ext_mgr): + for extension in ext_mgr.get_controller_extensions(): + ext_name = extension.extension.name + collection = extension.collection + controller = extension.controller + + if collection not in self.resources: + LOG.warning(_('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource') % + locals()) + continue + + LOG.debug(_('Extension %(ext_name)s extending resource: ' + '%(collection)s') % locals()) + + resource = self.resources[collection] + resource.register_actions(controller) + resource.register_extensions(controller) + + def _setup_routes(self, mapper, ext_mgr): + raise NotImplementedError + + +class FaultWrapper(fault.FaultWrapper): + def __init__(self, application): + LOG.warn(_('cinder.api.openstack:FaultWrapper is deprecated. Please ' + 'use cinder.api.middleware.fault:FaultWrapper instead.')) + super(FaultWrapper, self).__init__(application) diff --git a/cinder/api/openstack/urlmap.py b/cinder/api/openstack/urlmap.py new file mode 100644 index 0000000000..ab240709dd --- /dev/null +++ b/cinder/api/openstack/urlmap.py @@ -0,0 +1,27 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import urlmap +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def urlmap_factory(loader, global_conf, **local_conf): + LOG.warn(_('cinder.api.openstack.urlmap:urlmap_factory is deprecated. ' + 'Please use cinder.api.urlmap:urlmap_factory instead.')) + urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/openstack/volume/__init__.py b/cinder/api/openstack/volume/__init__.py new file mode 100644 index 0000000000..875db43472 --- /dev/null +++ b/cinder/api/openstack/volume/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.v1.router import APIRouter as v1_router +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class APIRouter(v1_router): + def __init__(self, ext_mgr=None): + LOG.warn(_('cinder.api.openstack.volume:APIRouter is deprecated. ' + 'Please use cinder.api.v1.router:APIRouter instead.')) + super(APIRouter, self).__init__(ext_mgr) diff --git a/cinder/api/openstack/volume/versions.py b/cinder/api/openstack/volume/versions.py new file mode 100644 index 0000000000..857b7698c7 --- /dev/null +++ b/cinder/api/openstack/volume/versions.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class Versions(versions.Versions): + def __init__(self): + LOG.warn(_('cinder.api.openstack.volume.versions.Versions is ' + 'deprecated. Please use cinder.api.versions.Versions ' + 'instead.')) + super(Versions, self).__init__() diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py new file mode 100644 index 0000000000..cc882c826a --- /dev/null +++ b/cinder/api/openstack/wsgi.py @@ -0,0 +1,1144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import math +import time +import webob + +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi + +from lxml import etree +from xml.dom import minidom +from xml.parsers import expat + + +XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + +LOG = logging.getLogger(__name__) + +# The vendor content types should serialize identically to the non-vendor +# content types. So to avoid littering the code with both options, we +# map the vendor to the other when looking up the type +_CONTENT_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'application/json', + 'application/vnd.openstack.volume+xml': 'application/xml', +} + +SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.volume+json', + 'application/xml', + 'application/vnd.openstack.volume+xml', +) + +_MEDIA_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'json', + 'application/json': 'json', + 'application/vnd.openstack.volume+xml': 'xml', + 'application/xml': 'xml', + 'application/atom+xml': 'atom', +} + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def best_match_content_type(self): + """Determine the requested response content-type.""" + if 'cinder.best_content_type' not in self.environ: + # Calculate the best MIME type + content_type = None + + # Check URL path suffix + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in SUPPORTED_CONTENT_TYPES: + content_type = possible_type + + if not content_type: + content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) + + self.environ['cinder.best_content_type'] = (content_type or + 'application/json') + + return self.environ['cinder.best_content_type'] + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if "Content-Type" not in self.headers: + return None + + allowed_types = SUPPORTED_CONTENT_TYPES + content_type = self.content_type + + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) + + return content_type + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def _from_xml(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + + try: + node = utils.safe_minidom_parse_string(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" + + def find_attribute_or_element(self, parent, name): + """Get an attribute value; fallback to an element if not found""" + if parent.hasAttribute(name): + return parent.getAttribute(name) + + node = self.find_first_child_named(parent, name) + if node: + return self.extract_text(node) + + return None + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + +class MetadataXMLDeserializer(XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + metadata = {} + if metadata_node is not None: + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + +class DictSerializer(ActionDispatcher): + """Default request body serialization""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization""" + + def default(self, data): + return jsonutils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + return self.to_xml_string(node) + + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml('UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, has_atom=False): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) + if has_atom: + node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + def _create_link_nodes(self, xml_doc, links): + link_nodes = [] + for link in links: + link_node = xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) + link_nodes.append(link_node) + return link_nodes + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + + +def serializers(**serializers): + """Attaches serializers to a method. + + This decorator associates a dictionary of serializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_serializers'): + func.wsgi_serializers = {} + func.wsgi_serializers.update(serializers) + return func + return decorator + + +def deserializers(**deserializers): + """Attaches deserializers to a method. + + This decorator associates a dictionary of deserializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_deserializers'): + func.wsgi_deserializers = {} + func.wsgi_deserializers.update(deserializers) + return func + return decorator + + +def response(code): + """Attaches response code to a method. + + This decorator associates a response code with a method. Note + that the function attributes are directly manipulated; the method + is not wrapped. + """ + + def decorator(func): + func.wsgi_code = code + return func + return decorator + + +class ResponseObject(object): + """Bundles a response object with appropriate serializers. + + Object that app methods may return in order to bind alternate + serializers with a response object to be serialized. Its use is + optional. + """ + + def __init__(self, obj, code=None, **serializers): + """Binds serializers with an object. + + Takes keyword arguments akin to the @serializer() decorator + for specifying serializers. Serializers specified will be + given preference over default serializers or method-specific + serializers on return. + """ + + self.obj = obj + self.serializers = serializers + self._default_code = 200 + self._code = code + self._headers = {} + self.serializer = None + self.media_type = None + + def __getitem__(self, key): + """Retrieves a header with the given name.""" + + return self._headers[key.lower()] + + def __setitem__(self, key, value): + """Sets a header with the given name to the given value.""" + + self._headers[key.lower()] = value + + def __delitem__(self, key): + """Deletes the header with the given name.""" + + del self._headers[key.lower()] + + def _bind_method_serializers(self, meth_serializers): + """Binds method serializers with the response object. + + Binds the method serializers with the response object. + Serializers specified to the constructor will take precedence + over serializers specified to this method. + + :param meth_serializers: A dictionary with keys mapping to + response types and values containing + serializer objects. + """ + + # We can't use update because that would be the wrong + # precedence + for mtype, serializer in meth_serializers.items(): + self.serializers.setdefault(mtype, serializer) + + def get_serializer(self, content_type, default_serializers=None): + """Returns the serializer for the wrapped object. + + Returns the serializer for the wrapped object subject to the + indicated content type. If no serializer matching the content + type is attached, an appropriate serializer drawn from the + default serializers will be used. If no appropriate + serializer is available, raises InvalidContentType. + """ + + default_serializers = default_serializers or {} + + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in self.serializers: + return mtype, self.serializers[mtype] + else: + return mtype, default_serializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def preserialize(self, content_type, default_serializers=None): + """Prepares the serializer that will be used to serialize. + + Determines the serializer that will be used and prepares an + instance of it for later call. This allows the serializer to + be accessed by extensions for, e.g., template extension. + """ + + mtype, serializer = self.get_serializer(content_type, + default_serializers) + self.media_type = mtype + self.serializer = serializer() + + def attach(self, **kwargs): + """Attach slave templates to serializers.""" + + if self.media_type in kwargs: + self.serializer.attach(kwargs[self.media_type]) + + def serialize(self, request, content_type, default_serializers=None): + """Serializes the wrapped object. + + Utility method for serializing the wrapped object. Returns a + webob.Response object. + """ + + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = value + response.headers['Content-Type'] = content_type + if self.obj is not None: + response.body = serializer.serialize(self.obj) + + return response + + @property + def code(self): + """Retrieve the response status.""" + + return self._code or self._default_code + + @property + def headers(self): + """Retrieve the headers.""" + + return self._headers.copy() + + +def action_peek_json(body): + """Determine action to invoke.""" + + try: + decoded = jsonutils.loads(body) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + # Make sure there's exactly one key... + if len(decoded) != 1: + msg = _("too many body keys") + raise exception.MalformedRequestBody(reason=msg) + + # Return the action and the decoded body... + return decoded.keys()[0] + + +def action_peek_xml(body): + """Determine action to invoke.""" + + dom = utils.safe_minidom_parse_string(body) + action_node = dom.childNodes[0] + + return action_node.tagName + + +class ResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + msg = unicode(ex_value) + raise Fault(webob.exc.HTTPForbidden(explanation=msg)) + elif isinstance(ex_value, exception.Invalid): + raise Fault(exception.ConvertedException( + code=ex_value.code, explanation=unicode(ex_value))) + elif isinstance(ex_value, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_( + 'Exception handling resource: %s') % + ex_value, exc_info=exc_info) + raise Fault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, Fault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise Fault(ex_value) + + # We didn't handle the exception + return False + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + Exceptions derived from webob.exc.HTTPException will be automatically + wrapped in Fault() to provide API friendly error responses. + + """ + + def __init__(self, controller, action_peek=None, **deserializers): + """ + :param controller: object that implement methods created by routes lib + :param action_peek: dictionary of routines for peeking into an action + request body to determine the desired action + """ + + self.controller = controller + + default_deserializers = dict(xml=XMLDeserializer, + json=JSONDeserializer) + default_deserializers.update(deserializers) + + self.default_deserializers = default_deserializers + self.default_serializers = dict(xml=XMLDictSerializer, + json=JSONDictSerializer) + + self.action_peek = dict(xml=action_peek_xml, + json=action_peek_json) + self.action_peek.update(action_peek or {}) + + # Copy over the actions dictionary + self.wsgi_actions = {} + if controller: + self.register_actions(controller) + + # Save a mapping of extensions + self.wsgi_extensions = {} + self.wsgi_action_extensions = {} + + def register_actions(self, controller): + """Registers controller actions with this resource.""" + + actions = getattr(controller, 'wsgi_actions', {}) + for key, method_name in actions.items(): + self.wsgi_actions[key] = getattr(controller, method_name) + + def register_extensions(self, controller): + """Registers controller extensions with this resource.""" + + extensions = getattr(controller, 'wsgi_extensions', []) + for method_name, action_name in extensions: + # Look up the extending method + extension = getattr(controller, method_name) + + if action_name: + # Extending an action... + if action_name not in self.wsgi_action_extensions: + self.wsgi_action_extensions[action_name] = [] + self.wsgi_action_extensions[action_name].append(extension) + else: + # Extending a regular method + if method_name not in self.wsgi_extensions: + self.wsgi_extensions[method_name] = [] + self.wsgi_extensions[method_name].append(extension) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + + # NOTE(Vek): Check for get_action_args() override in the + # controller + if hasattr(self.controller, 'get_action_args'): + return self.controller.get_action_args(request_environment) + + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except (KeyError, IndexError, AttributeError): + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + def get_body(self, request): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return None, '' + + if not content_type: + LOG.debug(_("No Content-Type provided in request")) + return None, '' + + if len(request.body) <= 0: + LOG.debug(_("Empty body provided in request")) + return None, '' + + return content_type, request.body + + def deserialize(self, meth, content_type, body): + meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in meth_deserializers: + deserializer = meth_deserializers[mtype] + else: + deserializer = self.default_deserializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + return deserializer().deserialize(body) + + def pre_process_extensions(self, extensions, request, action_args): + # List of callables for post-processing extensions + post = [] + + for ext in extensions: + if inspect.isgeneratorfunction(ext): + response = None + + # If it's a generator function, the part before the + # yield is the preprocessing stage + try: + with ResourceExceptionHandler(): + gen = ext(req=request, **action_args) + response = gen.next() + except Fault as ex: + response = ex + + # We had a response... + if response: + return response, [] + + # No response, queue up generator for post-processing + post.append(gen) + else: + # Regular functions only perform post-processing + post.append(ext) + + # Run post-processing in the reverse order + return None, reversed(post) + + def post_process_extensions(self, extensions, resp_obj, request, + action_args): + for ext in extensions: + response = None + if inspect.isgenerator(ext): + # If it's a generator, run the second half of + # processing + try: + with ResourceExceptionHandler(): + response = ext.send(resp_obj) + except StopIteration: + # Normal exit of generator + continue + except Fault as ex: + response = ex + else: + # Regular functions get post-processing... + try: + with ResourceExceptionHandler(): + response = ext(req=request, resp_obj=resp_obj, + **action_args) + except Fault as ex: + response = ex + + # We had a response... + if response: + return response + + return None + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) + + # Identify the action, its arguments, and the requested + # content type + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + content_type, body = self.get_body(request) + accept = request.best_match_content_type() + + # NOTE(Vek): Splitting the function up this way allows for + # auditing by external tools that wrap the existing + # function. If we try to audit __call__(), we can + # run into troubles due to the @webob.dec.wsgify() + # decorator. + return self._process_stack(request, action, action_args, + content_type, body, accept) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + + # Get the implementing method + try: + meth, extensions = self.get_method(request, action, + content_type, body) + except (AttributeError, TypeError): + return Fault(webob.exc.HTTPNotFound()) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Now, deserialize the request body... + try: + if content_type: + contents = self.deserialize(meth, content_type, body) + else: + contents = {} + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Update the action args + action_args.update(contents) + + project_id = action_args.pop("project_id", None) + context = request.environ.get('cinder.context') + if (context and project_id and (project_id != context.project_id)): + msg = _("Malformed request url") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Run pre-processing extensions + response, post = self.pre_process_extensions(extensions, + request, action_args) + + if not response: + try: + with ResourceExceptionHandler(): + action_result = self.dispatch(meth, request, action_args) + except Fault as ex: + response = ex + + if not response: + # No exceptions; convert action_result into a + # ResponseObject + resp_obj = None + if type(action_result) is dict or action_result is None: + resp_obj = ResponseObject(action_result) + elif isinstance(action_result, ResponseObject): + resp_obj = action_result + else: + response = action_result + + # Run post-processing extensions + if resp_obj: + _set_request_id_header(request, resp_obj) + # Do a preserialize to set up the response object + serializers = getattr(meth, 'wsgi_serializers', {}) + resp_obj._bind_method_serializers(serializers) + if hasattr(meth, 'wsgi_code'): + resp_obj._default_code = meth.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + + # Process post-processing extensions + response = self.post_process_extensions(post, resp_obj, + request, action_args) + + if resp_obj and not response: + response = resp_obj.serialize(request, accept, + self.default_serializers) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError, e: + msg_dict = dict(url=request.url, e=e) + msg = _("%(url)s returned a fault: %(e)s") % msg_dict + + LOG.info(msg) + + return response + + def get_method(self, request, action, content_type, body): + """Look up the action-specific method and its extensions.""" + + # Look up the method + try: + if not self.controller: + meth = getattr(self, action) + else: + meth = getattr(self.controller, action) + except AttributeError: + if (not self.wsgi_actions or + action not in ['action', 'create', 'delete']): + # Propagate the error + raise + else: + return meth, self.wsgi_extensions.get(action, []) + + if action == 'action': + # OK, it's an action; figure out which action... + mtype = _MEDIA_TYPE_MAP.get(content_type) + action_name = self.action_peek[mtype](body) + LOG.debug("Action body: %s" % body) + else: + action_name = action + + # Look up the action method + return (self.wsgi_actions[action_name], + self.wsgi_action_extensions.get(action_name, [])) + + def dispatch(self, method, request, action_args): + """Dispatch a call to the action-specific method.""" + + return method(req=request, **action_args) + + +def action(name): + """Mark a function as an action. + + The given name will be taken as the action key in the body. + + This is also overloaded to allow extensions to provide + non-extending definitions of create and delete operations. + """ + + def decorator(func): + func.wsgi_action = name + return func + return decorator + + +def extends(*args, **kwargs): + """Indicate a function extends an operation. + + Can be used as either:: + + @extends + def index(...): + pass + + or as:: + + @extends(action='resize') + def _action_resize(...): + pass + """ + + def decorator(func): + # Store enough information to find what we're extending + func.wsgi_extends = (func.__name__, kwargs.get('action')) + return func + + # If we have positional arguments, call the decorator + if args: + return decorator(*args) + + # OK, return the decorator instead + return decorator + + +class ControllerMetaclass(type): + """Controller metaclass. + + This metaclass automates the task of assembling a dictionary + mapping action keys to method names. + """ + + def __new__(mcs, name, bases, cls_dict): + """Adds the wsgi_actions dictionary to the class.""" + + # Find all actions + actions = {} + extensions = [] + # start with wsgi actions from base classes + for base in bases: + actions.update(getattr(base, 'wsgi_actions', {})) + for key, value in cls_dict.items(): + if not callable(value): + continue + if getattr(value, 'wsgi_action', None): + actions[value.wsgi_action] = key + elif getattr(value, 'wsgi_extends', None): + extensions.append(value.wsgi_extends) + + # Add the actions and extensions to the class dict + cls_dict['wsgi_actions'] = actions + cls_dict['wsgi_extensions'] = extensions + + return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, + cls_dict) + + +class Controller(object): + """Default controller.""" + + __metaclass__ = ControllerMetaclass + + _view_builder_class = None + + def __init__(self, view_builder=None): + """Initialize controller with a view builder instance.""" + if view_builder: + self._view_builder = view_builder + elif self._view_builder_class: + self._view_builder = self._view_builder_class() + else: + self._view_builder = None + + @staticmethod + def is_valid_body(body, entity_name): + if not (body and entity_name in body): + return False + + def is_dict(d): + try: + d.get(None) + return True + except AttributeError: + return False + + if not is_dict(body[entity_name]): + return False + + return True + + +class Fault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + _fault_names = {400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = exception.status_int + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "computeFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + if code == 413: + retry = self.wrapped_exc.headers['Retry-After'] + fault_data[fault_name]['retryAfter'] = retry + + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {fault_name: 'code'}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + _set_request_id_header(req, self.wrapped_exc.headers) + + return self.wrapped_exc + + def __str__(self): + return self.wrapped_exc.__str__() + + +def _set_request_id_header(req, headers): + context = req.environ.get('cinder.context') + if context: + headers['x-compute-request-id'] = context.request_id + + +class OverLimitFault(webob.exc.HTTPException): + """ + Rate-limited request response. + """ + + def __init__(self, message, details, retry_time): + """ + Initialize new `OverLimitFault` with relevant information. + """ + hdrs = OverLimitFault._retry_after(retry_time) + self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) + self.content = { + "overLimitFault": { + "code": self.wrapped_exc.status_int, + "message": message, + "details": details, + }, + } + + @staticmethod + def _retry_after(retry_time): + delay = int(math.ceil(retry_time - time.time())) + retry_after = delay if delay > 0 else 0 + headers = {'Retry-After': '%d' % retry_after} + return headers + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """ + Return the wrapped exception with a serialized body conforming to our + error format. + """ + content_type = request.best_match_content_type() + metadata = {"attributes": {"overLimitFault": "code"}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) + self.wrapped_exc.body = content + + return self.wrapped_exc diff --git a/cinder/api/schemas/atom-link.rng b/cinder/api/schemas/atom-link.rng new file mode 100644 index 0000000000..edba5eee6c --- /dev/null +++ b/cinder/api/schemas/atom-link.rng @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + [^:]* + + + + + + .+/.+ + + + + + + [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* + + + + + + + + + + + + xml:base + xml:lang + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cinder/api/schemas/v1.1/extension.rng b/cinder/api/schemas/v1.1/extension.rng new file mode 100644 index 0000000000..b16d8c1300 --- /dev/null +++ b/cinder/api/schemas/v1.1/extension.rng @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/cinder/api/schemas/v1.1/extensions.rng b/cinder/api/schemas/v1.1/extensions.rng new file mode 100644 index 0000000000..8538eaf2da --- /dev/null +++ b/cinder/api/schemas/v1.1/extensions.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/cinder/api/schemas/v1.1/limits.rng b/cinder/api/schemas/v1.1/limits.rng new file mode 100644 index 0000000000..a66af4b9c4 --- /dev/null +++ b/cinder/api/schemas/v1.1/limits.rng @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cinder/api/schemas/v1.1/metadata.rng b/cinder/api/schemas/v1.1/metadata.rng new file mode 100644 index 0000000000..b2f5d702a2 --- /dev/null +++ b/cinder/api/schemas/v1.1/metadata.rng @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/cinder/api/sizelimit.py b/cinder/api/sizelimit.py new file mode 100644 index 0000000000..70cd45473e --- /dev/null +++ b/cinder/api/sizelimit.py @@ -0,0 +1,28 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.middleware import sizelimit +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): + def __init__(self, *args, **kwargs): + LOG.warn(_('cinder.api.sizelimit:RequestBodySizeLimiter is ' + 'deprecated. Please use cinder.api.middleware.sizelimit:' + 'RequestBodySizeLimiter instead')) + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) diff --git a/cinder/api/urlmap.py b/cinder/api/urlmap.py new file mode 100644 index 0000000000..18ec2020d0 --- /dev/null +++ b/cinder/api/urlmap.py @@ -0,0 +1,297 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap +import re +import urllib2 + +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging + + +_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' +_option_header_piece_re = re.compile( + r';\s*([^\s;=]+|%s)\s*' + r'(?:=\s*([^;]+|%s))?\s*' % + (_quoted_string_re, _quoted_string_re)) + +LOG = logging.getLogger(__name__) + + +def unquote_header_value(value): + """Unquotes a header value. + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + return value + + +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + :param value: a string with a list header. + :return: :class:`list` + """ + result = [] + for item in urllib2.parse_http_list(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +def parse_options_header(value): + """Parse a ``Content-Type`` like header into a tuple with the content + type and the options: + + >>> parse_options_header('Content-Type: text/html; mimetype=text/html') + ('Content-Type:', {'mimetype': 'text/html'}) + + :param value: the header to parse. + :return: (str, options) + """ + def _tokenize(string): + for match in _option_header_piece_re.finditer(string): + key, value = match.groups() + key = unquote_header_value(key) + if value is not None: + value = unquote_header_value(value) + yield key, value + + if not value: + return '', {} + + parts = _tokenize(';' + value) + name = parts.next()[0] + extra = dict(parts) + return name, extra + + +class Accept(object): + def __init__(self, value): + self._content_types = [parse_options_header(v) for v in + parse_list_header(value)] + + def best_match(self, supported_content_types): + # FIXME: Should we have a more sophisticated matching algorithm that + # takes into account the version as well? + best_quality = -1 + best_content_type = None + best_params = {} + best_match = '*/*' + + for content_type in supported_content_types: + for content_mask, params in self._content_types: + try: + quality = float(params.get('q', 1)) + except ValueError: + continue + + if quality < best_quality: + continue + elif best_quality == quality: + if best_match.count('*') <= content_mask.count('*'): + continue + + if self._match_mask(content_mask, content_type): + best_quality = quality + best_content_type = content_type + best_params = params + best_match = content_mask + + return best_content_type, best_params + + def content_type_params(self, best_content_type): + """Find parameters in Accept header for given content type.""" + for content_type, params in self._content_types: + if best_content_type == content_type: + return params + + return {} + + def _match_mask(self, mask, content_type): + if '*' not in mask: + return content_type == mask + if mask == '*/*': + return True + mask_major = mask[:-2] + content_type_major = content_type.split('/', 1)[0] + return content_type_major == mask_major + + +def urlmap_factory(loader, global_conf, **local_conf): + if 'not_found_app' in local_conf: + not_found_app = local_conf.pop('not_found_app') + else: + not_found_app = global_conf.get('not_found_app') + if not_found_app: + not_found_app = loader.get_app(not_found_app, global_conf=global_conf) + urlmap = URLMap(not_found_app=not_found_app) + for path, app_name in local_conf.items(): + path = paste.urlmap.parse_path_expression(path) + app = loader.get_app(app_name, global_conf=global_conf) + urlmap[path] = app + return urlmap + + +class URLMap(paste.urlmap.URLMap): + def _match(self, host, port, path_info): + """Find longest match for a given URL path.""" + for (domain, app_url), app in self.applications: + if domain and domain != host and domain != host + ':' + port: + continue + if (path_info == app_url or path_info.startswith(app_url + '/')): + return app, app_url + + return None, None + + def _set_script_name(self, app, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + return app(environ, start_response) + + return wrap + + def _munge_path(self, app, path_info, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + environ['PATH_INFO'] = path_info[len(app_url):] + return app(environ, start_response) + + return wrap + + def _path_strategy(self, host, port, path_info): + """Check path suffix for MIME type and path prefix for API version.""" + mime_type = app = app_url = None + + parts = path_info.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: + mime_type = possible_type + + parts = path_info.split('/') + if len(parts) > 1: + possible_app, possible_app_url = self._match(host, port, path_info) + # Don't use prefix if it ends up matching default + if possible_app and possible_app_url: + app_url = possible_app_url + app = self._munge_path(possible_app, path_info, app_url) + + return mime_type, app, app_url + + def _content_type_strategy(self, host, port, environ): + """Check Content-Type header for API version.""" + app = None + params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return app + + def _accept_strategy(self, host, port, environ, supported_content_types): + """Check Accept header for best matching MIME type and API version.""" + accept = Accept(environ.get('HTTP_ACCEPT', '')) + + app = None + + # Find the best match in the Accept header + mime_type, params = accept.best_match(supported_content_types) + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return mime_type, app + + def __call__(self, environ, start_response): + host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() + if ':' in host: + host, port = host.split(':', 1) + else: + if environ['wsgi.url_scheme'] == 'http': + port = '80' + else: + port = '443' + + path_info = environ['PATH_INFO'] + path_info = self.normalize_url(path_info, False)[1] + + # The MIME type for the response is determined in one of two ways: + # 1) URL path suffix (eg /servers/detail.json) + # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) + + # The API version is determined in one of three ways: + # 1) URL path prefix (eg /v1.1/tenant/servers/detail) + # 2) Content-Type header (eg application/json;version=1.1) + # 3) Accept header (eg application/json;q=0.8;version=1.1) + + supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) + + mime_type, app, app_url = self._path_strategy(host, port, path_info) + + # Accept application/atom+xml for the index query of each API + # version mount point as well as the root index + if (app_url and app_url + '/' == path_info) or path_info == '/': + supported_content_types.append('application/atom+xml') + + if not app: + app = self._content_type_strategy(host, port, environ) + + if not mime_type or not app: + possible_mime_type, possible_app = self._accept_strategy( + host, port, environ, supported_content_types) + if possible_mime_type and not mime_type: + mime_type = possible_mime_type + if possible_app and not app: + app = possible_app + + if not mime_type: + mime_type = 'application/json' + + if not app: + # Didn't match a particular version, probably matches default + app, app_url = self._match(host, port, path_info) + if app: + app = self._munge_path(app, path_info, app_url) + + if app: + environ['cinder.best_content_type'] = mime_type + return app(environ, start_response) + + environ['paste.urlmap_object'] = self + return self.not_found_application(environ, start_response) diff --git a/cinder/api/v1/__init__.py b/cinder/api/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/api/v1/limits.py b/cinder/api/v1/limits.py new file mode 100644 index 0000000000..b8a0ad848d --- /dev/null +++ b/cinder/api/v1/limits.py @@ -0,0 +1,482 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Module dedicated functions/classes dealing with rate limiting requests. +""" + +import collections +import copy +import httplib +import math +import re +import time + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.api.views import limits as limits_views +from cinder.api import xmlutil +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder import quota +from cinder import wsgi as base_wsgi + +QUOTAS = quota.QUOTAS + + +# Convenience constants for the limits dictionary passed to Limiter(). +PER_SECOND = 1 +PER_MINUTE = 60 +PER_HOUR = 60 * 60 +PER_DAY = 60 * 60 * 24 + + +limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class LimitsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('limits', selector='limits') + + rates = xmlutil.SubTemplateElement(root, 'rates') + rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') + rate.set('uri', 'uri') + rate.set('regex', 'regex') + limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') + limit.set('value', 'value') + limit.set('verb', 'verb') + limit.set('remaining', 'remaining') + limit.set('unit', 'unit') + limit.set('next-available', 'next-available') + + absolute = xmlutil.SubTemplateElement(root, 'absolute', + selector='absolute') + limit = xmlutil.SubTemplateElement(absolute, 'limit', + selector=xmlutil.get_items) + limit.set('name', 0) + limit.set('value', 1) + + return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) + + +class LimitsController(object): + """ + Controller for accessing limits in the OpenStack API. + """ + + @wsgi.serializers(xml=LimitsTemplate) + def index(self, req): + """ + Return all global and rate limit information. + """ + context = req.environ['cinder.context'] + quotas = QUOTAS.get_project_quotas(context, context.project_id, + usages=False) + abs_limits = dict((k, v['limit']) for k, v in quotas.items()) + rate_limits = req.environ.get("cinder.limits", []) + + builder = self._get_view_builder(req) + return builder.build(rate_limits, abs_limits) + + def _get_view_builder(self, req): + return limits_views.ViewBuilder() + + +def create_resource(): + return wsgi.Resource(LimitsController()) + + +class Limit(object): + """ + Stores information about a limit for HTTP requests. + """ + + UNITS = { + 1: "SECOND", + 60: "MINUTE", + 60 * 60: "HOUR", + 60 * 60 * 24: "DAY", + } + + UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) + + def __init__(self, verb, uri, regex, value, unit): + """ + Initialize a new `Limit`. + + @param verb: HTTP verb (POST, PUT, etc.) + @param uri: Human-readable URI + @param regex: Regular expression format for this limit + @param value: Integer number of requests which can be made + @param unit: Unit of measure for the value parameter + """ + self.verb = verb + self.uri = uri + self.regex = regex + self.value = int(value) + self.unit = unit + self.unit_string = self.display_unit().lower() + self.remaining = int(value) + + if value <= 0: + raise ValueError("Limit value must be > 0") + + self.last_request = None + self.next_request = None + + self.water_level = 0 + self.capacity = self.unit + self.request_value = float(self.capacity) / float(self.value) + msg = _("Only %(value)s %(verb)s request(s) can be " + "made to %(uri)s every %(unit_string)s.") + self.error_message = msg % self.__dict__ + + def __call__(self, verb, url): + """ + Represents a call to this limit from a relevant request. + + @param verb: string http verb (POST, GET, etc.) + @param url: string URL + """ + if self.verb != verb or not re.match(self.regex, url): + return + + now = self._get_time() + + if self.last_request is None: + self.last_request = now + + leak_value = now - self.last_request + + self.water_level -= leak_value + self.water_level = max(self.water_level, 0) + self.water_level += self.request_value + + difference = self.water_level - self.capacity + + self.last_request = now + + if difference > 0: + self.water_level -= self.request_value + self.next_request = now + difference + return difference + + cap = self.capacity + water = self.water_level + val = self.value + + self.remaining = math.floor(((cap - water) / cap) * val) + self.next_request = now + + def _get_time(self): + """Retrieve the current time. Broken out for testability.""" + return time.time() + + def display_unit(self): + """Display the string name of the unit.""" + return self.UNITS.get(self.unit, "UNKNOWN") + + def display(self): + """Return a useful representation of this class.""" + return { + "verb": self.verb, + "URI": self.uri, + "regex": self.regex, + "value": self.value, + "remaining": int(self.remaining), + "unit": self.display_unit(), + "resetTime": int(self.next_request or self._get_time()), + } + +# "Limit" format is a dictionary with the HTTP verb, human-readable URI, +# a regular-expression to match, value and unit of measure (PER_DAY, etc.) + +DEFAULT_LIMITS = [ + Limit("POST", "*", ".*", 10, PER_MINUTE), + Limit("POST", "*/servers", "^/servers", 50, PER_DAY), + Limit("PUT", "*", ".*", 10, PER_MINUTE), + Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), + Limit("DELETE", "*", ".*", 100, PER_MINUTE), +] + + +class RateLimitingMiddleware(base_wsgi.Middleware): + """ + Rate-limits requests passing through this middleware. All limit information + is stored in memory for this implementation. + """ + + def __init__(self, application, limits=None, limiter=None, **kwargs): + """ + Initialize new `RateLimitingMiddleware`, which wraps the given WSGI + application and sets up the given limits. + + @param application: WSGI application to wrap + @param limits: String describing limits + @param limiter: String identifying class for representing limits + + Other parameters are passed to the constructor for the limiter. + """ + base_wsgi.Middleware.__init__(self, application) + + # Select the limiter class + if limiter is None: + limiter = Limiter + else: + limiter = importutils.import_class(limiter) + + # Parse the limits, if any are provided + if limits is not None: + limits = limiter.parse_limits(limits) + + self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """ + Represents a single call through this middleware. We should record the + request if we have a limit relevant to it. If no limit is relevant to + the request, ignore it. + + If the request should be rate limited, return a fault telling the user + they are over the limit and need to retry later. + """ + verb = req.method + url = req.url + context = req.environ.get("cinder.context") + + if context: + username = context.user_id + else: + username = None + + delay, error = self._limiter.check_for_delay(verb, url, username) + + if delay: + msg = _("This request was rate-limited.") + retry = time.time() + delay + return wsgi.OverLimitFault(msg, error, retry) + + req.environ["cinder.limits"] = self._limiter.get_limits(username) + + return self.application + + +class Limiter(object): + """ + Rate-limit checking class which handles limits in memory. + """ + + def __init__(self, limits, **kwargs): + """ + Initialize the new `Limiter`. + + @param limits: List of `Limit` objects + """ + self.limits = copy.deepcopy(limits) + self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) + + # Pick up any per-user limit information + for key, value in kwargs.items(): + if key.startswith('user:'): + username = key[5:] + self.levels[username] = self.parse_limits(value) + + def get_limits(self, username=None): + """ + Return the limits for a given user. + """ + return [limit.display() for limit in self.levels[username]] + + def check_for_delay(self, verb, url, username=None): + """ + Check the given verb/user/user triplet for limit. + + @return: Tuple of delay (in seconds) and error message (or None, None) + """ + delays = [] + + for limit in self.levels[username]: + delay = limit(verb, url) + if delay: + delays.append((delay, limit.error_message)) + + if delays: + delays.sort() + return delays[0] + + return None, None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. We + # put this in the class so that subclasses can override the + # default limit parsing. + @staticmethod + def parse_limits(limits): + """ + Convert a string into a list of Limit instances. This + implementation expects a semicolon-separated sequence of + parenthesized groups, where each group contains a + comma-separated sequence consisting of HTTP method, + user-readable URI, a URI reg-exp, an integer number of + requests which can be made, and a unit of measure. Valid + values for the latter are "SECOND", "MINUTE", "HOUR", and + "DAY". + + @return: List of Limit instances. + """ + + # Handle empty limit strings + limits = limits.strip() + if not limits: + return [] + + # Split up the limits by semicolon + result = [] + for group in limits.split(';'): + group = group.strip() + if group[:1] != '(' or group[-1:] != ')': + raise ValueError("Limit rules must be surrounded by " + "parentheses") + group = group[1:-1] + + # Extract the Limit arguments + args = [a.strip() for a in group.split(',')] + if len(args) != 5: + raise ValueError("Limit rules must contain the following " + "arguments: verb, uri, regex, value, unit") + + # Pull out the arguments + verb, uri, regex, value, unit = args + + # Upper-case the verb + verb = verb.upper() + + # Convert value--raises ValueError if it's not integer + value = int(value) + + # Convert unit + unit = unit.upper() + if unit not in Limit.UNIT_MAP: + raise ValueError("Invalid units specified") + unit = Limit.UNIT_MAP[unit] + + # Build a limit + result.append(Limit(verb, uri, regex, value, unit)) + + return result + + +class WsgiLimiter(object): + """ + Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. + + To use, POST ``/`` with JSON data such as:: + + { + "verb" : GET, + "path" : "/servers" + } + + and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds + header containing the number of seconds to wait before the action would + succeed. + """ + + def __init__(self, limits=None): + """ + Initialize the new `WsgiLimiter`. + + @param limits: List of `Limit` objects + """ + self._limiter = Limiter(limits or DEFAULT_LIMITS) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, request): + """ + Handles a call to this application. Returns 204 if the request is + acceptable to the limiter, else a 403 is returned with a relevant + header indicating when the request *will* succeed. + """ + if request.method != "POST": + raise webob.exc.HTTPMethodNotAllowed() + + try: + info = dict(jsonutils.loads(request.body)) + except ValueError: + raise webob.exc.HTTPBadRequest() + + username = request.path_info_pop() + verb = info.get("verb") + path = info.get("path") + + delay, error = self._limiter.check_for_delay(verb, path, username) + + if delay: + headers = {"X-Wait-Seconds": "%.2f" % delay} + return webob.exc.HTTPForbidden(headers=headers, explanation=error) + else: + return webob.exc.HTTPNoContent() + + +class WsgiLimiterProxy(object): + """ + Rate-limit requests based on answers from a remote source. + """ + + def __init__(self, limiter_address): + """ + Initialize the new `WsgiLimiterProxy`. + + @param limiter_address: IP/port combination of where to request limit + """ + self.limiter_address = limiter_address + + def check_for_delay(self, verb, path, username=None): + body = jsonutils.dumps({"verb": verb, "path": path}) + headers = {"Content-Type": "application/json"} + + conn = httplib.HTTPConnection(self.limiter_address) + + if username: + conn.request("POST", "/%s" % (username), body, headers) + else: + conn.request("POST", "/", body, headers) + + resp = conn.getresponse() + + if 200 >= resp.status < 300: + return None, None + + return resp.getheader("X-Wait-Seconds"), resp.read() or None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. + # This implementation returns an empty list, since all limit + # decisions are made by a remote server. + @staticmethod + def parse_limits(limits): + """ + Ignore a limits string--simply doesn't apply for the limit + proxy. + + @return: Empty list. + """ + + return [] diff --git a/cinder/api/v1/router.py b/cinder/api/v1/router.py new file mode 100644 index 0000000000..bb361412cd --- /dev/null +++ b/cinder/api/v1/router.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +from cinder.api import extensions +import cinder.api.openstack +from cinder.api.v1 import limits +from cinder.api.v1 import snapshot_metadata +from cinder.api.v1 import snapshots +from cinder.api.v1 import types +from cinder.api.v1 import volume_metadata +from cinder.api.v1 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper, ext_mgr): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource(ext_mgr) + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['snapshot_metadata'] = \ + snapshot_metadata.create_resource() + snapshot_metadata_controller = self.resources['snapshot_metadata'] + + mapper.resource("snapshot_metadata", "metadata", + controller=snapshot_metadata_controller, + parent_resource=dict(member_name='snapshot', + collection_name='snapshots')) + + self.resources['limits'] = limits.create_resource() + mapper.resource("limit", "limits", + controller=self.resources['limits']) + self.resources['volume_metadata'] = \ + volume_metadata.create_resource() + volume_metadata_controller = self.resources['volume_metadata'] + + mapper.resource("volume_metadata", "metadata", + controller=volume_metadata_controller, + parent_resource=dict(member_name='volume', + collection_name='volumes')) + + mapper.connect("metadata", + "/{project_id}/volumes/{volume_id}/metadata", + controller=volume_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) diff --git a/cinder/api/v1/snapshot_metadata.py b/cinder/api/v1/snapshot_metadata.py new file mode 100644 index 0000000000..6322204ff7 --- /dev/null +++ b/cinder/api/v1/snapshot_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(object): + """ The volume metadata API controller for the OpenStack API """ + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, snapshot_id): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, snapshot_id): + """ Returns the list of metadata for a given snapshot""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, snapshot_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, snapshot_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_snapshot_metadata(context, + snapshot_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_snapshot_metadata(self, context, + snapshot_id, metadata, + delete=False): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + return self.volume_api.update_snapshot_metadata(context, + snapshot, + metadata, + delete) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, snapshot_id, id): + """ Return a single metadata item """ + context = req.environ['cinder.context'] + data = self._get_metadata(context, snapshot_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, snapshot_id, id): + """ Deletes an existing metadata """ + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, snapshot_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v1/snapshots.py b/cinder/api/v1/snapshots.py new file mode 100644 index 0000000000..6dd24e1a70 --- /dev/null +++ b/cinder/api/v1/snapshots.py @@ -0,0 +1,234 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v1 import volumes +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import utils +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_snapshot_detail_view(context, snapshot): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, snapshot) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, snapshot): + """Maps keys for snapshots summary view.""" + d = {} + + d['id'] = snapshot['id'] + d['created_at'] = snapshot['created_at'] + d['display_name'] = snapshot['display_name'] + d['display_description'] = snapshot['display_description'] + d['volume_id'] = snapshot['volume_id'] + d['status'] = snapshot['status'] + d['size'] = snapshot['volume_size'] + + if snapshot.get('snapshot_metadata'): + metadata = snapshot.get('snapshot_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'), + dict): + d['metadata'] = snapshot['metadata'] + else: + d['metadata'] = {} + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_id') + elem.append(common.MetadataTemplate()) + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr=None): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + allowed_search_options = ('status', 'volume_id', 'display_name') + volumes.remove_invalid_options(context, search_opts, + allowed_search_options) + + snapshots = self.volume_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + kwargs = {} + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'snapshot'): + raise exc.HTTPUnprocessableEntity() + + snapshot = body['snapshot'] + kwargs['metadata'] = snapshot.get('metadata', None) + + volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + if not utils.is_valid_boolstr(force): + msg = _("Invalid value '%s' for force. ") % force + raise exception.InvalidParameterValue(err=msg) + + if strutils.bool_from_string(force): + new_snapshot = self.volume_api.create_snapshot_force( + context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description'), + **kwargs) + else: + new_snapshot = self.volume_api.create_snapshot( + context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description'), + **kwargs) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + @wsgi.serializers(xml=SnapshotTemplate) + def update(self, req, id, body): + """Update a snapshot.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if 'snapshot' not in body: + raise exc.HTTPUnprocessableEntity() + + snapshot = body['snapshot'] + update_dict = {} + + valid_update_keys = ( + 'display_name', + 'display_description', + ) + + for key in valid_update_keys: + if key in snapshot: + update_dict[key] = snapshot[key] + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.update_snapshot(context, snapshot, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + snapshot.update(update_dict) + + return {'snapshot': _translate_snapshot_detail_view(context, snapshot)} + + +def create_resource(ext_mgr): + return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v1/types.py b/cinder/api/v1/types.py new file mode 100644 index 0000000000..1513b8dcfa --- /dev/null +++ b/cinder/api/v1/types.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume type & volume types extra specs extension.""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.views import types as views_types +from cinder.api import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """Returns the list of volume types.""" + context = req.environ['cinder.context'] + vol_types = volume_types.get_all_types(context).values() + return self._view_builder.index(req, vol_types) + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """Return a single volume type item.""" + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return self._view_builder.show(req, vol_type) + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/v1/volume_metadata.py b/cinder/api/v1/volume_metadata.py new file mode 100644 index 0000000000..4f6df75b6e --- /dev/null +++ b/cinder/api/v1/volume_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(object): + """ The volume metadata API controller for the OpenStack API """ + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, volume_id): + try: + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_metadata(context, volume) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, volume_id): + """ Returns the list of metadata for a given volume""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, volume_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, volume_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_volume_metadata(context, + volume_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_volume_metadata(self, context, + volume_id, metadata, + delete=False): + try: + volume = self.volume_api.get(context, volume_id) + return self.volume_api.update_volume_metadata(context, + volume, + metadata, + delete) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, volume_id, id): + """ Return a single metadata item """ + context = req.environ['cinder.context'] + data = self._get_metadata(context, volume_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, volume_id, id): + """ Deletes an existing metadata """ + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, volume_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + volume = self.volume_api.get(context, volume_id) + self.volume_api.delete_volume_metadata(context, volume, id) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v1/volumes.py b/cinder/api/v1/volumes.py new file mode 100644 index 0000000000..b7a687ee1f --- /dev/null +++ b/cinder/api/v1/volumes.py @@ -0,0 +1,421 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_attachment_detail_view(_context, vol): + """Maps keys for attachment details view.""" + + d = _translate_attachment_summary_view(_context, vol) + + # No additional data / lookups at the moment + + return d + + +def _translate_attachment_summary_view(_context, vol): + """Maps keys for attachment summary view.""" + d = {} + + volume_id = vol['id'] + + # NOTE(justinsb): We use the volume id as the id of the attachment object + d['id'] = volume_id + + d['volume_id'] = volume_id + d['server_id'] = vol['instance_uuid'] + if vol.get('mountpoint'): + d['device'] = vol['mountpoint'] + + return d + + +def _translate_volume_detail_view(context, vol, image_id=None): + """Maps keys for volumes details view.""" + + d = _translate_volume_summary_view(context, vol, image_id) + + # No additional data / lookups at the moment + + return d + + +def _translate_volume_summary_view(context, vol, image_id=None): + """Maps keys for volumes summary view.""" + d = {} + + d['id'] = vol['id'] + d['status'] = vol['status'] + d['size'] = vol['size'] + d['availability_zone'] = vol['availability_zone'] + d['created_at'] = vol['created_at'] + + d['attachments'] = [] + if vol['attach_status'] == 'attached': + attachment = _translate_attachment_detail_view(context, vol) + d['attachments'].append(attachment) + + d['display_name'] = vol['display_name'] + d['display_description'] = vol['display_description'] + + if vol['volume_type_id'] and vol.get('volume_type'): + d['volume_type'] = vol['volume_type']['name'] + else: + # TODO(bcwaldon): remove str cast once we use uuids + d['volume_type'] = str(vol['volume_type_id']) + + d['snapshot_id'] = vol['snapshot_id'] + d['source_volid'] = vol['source_volid'] + + if image_id: + d['image_id'] = image_id + + LOG.audit(_("vol=%s"), vol, context=context) + + if vol.get('volume_metadata'): + metadata = vol.get('volume_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif vol.get('metadata') and isinstance(vol.get('metadata'), dict): + d['metadata'] = vol['metadata'] + else: + d['metadata'] = {} + + if vol.get('volume_glance_metadata'): + d['bootable'] = 'true' + else: + d['bootable'] = 'false' + + return d + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_type') + elem.set('snapshot_id') + elem.set('source_volid') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class CommonDeserializer(wsgi.MetadataXMLDeserializer): + """Common deserializer to handle xml-formatted volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + metadata_deserializer = common.MetadataXMLDeserializer() + + def _extract_volume(self, node): + """Marshal the volume attribute of a parsed request.""" + volume = {} + volume_node = self.find_first_child_named(node, 'volume') + + attributes = ['display_name', 'display_description', 'size', + 'volume_type', 'availability_zone'] + for attr in attributes: + if volume_node.getAttribute(attr): + volume[attr] = volume_node.getAttribute(attr) + + metadata_node = self.find_first_child_named(volume_node, 'metadata') + if metadata_node is not None: + volume['metadata'] = self.extract_metadata(metadata_node) + + return volume + + +class CreateDeserializer(CommonDeserializer): + """Deserializer to handle xml-formatted create volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + def default(self, string): + """Deserialize an xml-formatted volume create request.""" + dom = utils.safe_minidom_parse_string(string) + volume = self._extract_volume(dom) + return {'body': {'volume': volume}} + + +class VolumeController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(VolumeController, self).__init__() + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'volume': _translate_volume_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._items(req, entity_maker=_translate_volume_summary_view) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._items(req, entity_maker=_translate_volume_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of volumes, transformed through entity_maker.""" + + search_opts = {} + search_opts.update(req.GET) + + context = req.environ['cinder.context'] + remove_invalid_options(context, + search_opts, self._get_volume_search_options()) + + volumes = self.volume_api.get_all(context, marker=None, limit=None, + sort_key='created_at', + sort_dir='desc', filters=search_opts) + limited_list = common.limited(volumes, req) + res = [entity_maker(context, vol) for vol in limited_list] + return {'volumes': res} + + def _image_uuid_from_href(self, image_href): + # If the image href was generated by nova api, strip image_href + # down to an id. + try: + image_uuid = image_href.split('/').pop() + except (TypeError, AttributeError): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + if not uuidutils.is_uuid_like(image_uuid): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + return image_uuid + + @wsgi.serializers(xml=VolumeTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Creates a new volume.""" + if not self.is_valid_body(body, 'volume'): + raise exc.HTTPUnprocessableEntity() + + context = req.environ['cinder.context'] + volume = body['volume'] + + kwargs = {} + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + if not uuidutils.is_uuid_like(req_volume_type): + try: + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( + context, req_volume_type) + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) + else: + try: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + source_volid = volume.get('source_volid') + if source_volid is not None: + kwargs['source_volume'] = self.volume_api.get_volume(context, + source_volid) + else: + kwargs['source_volume'] = None + + size = volume.get('size', None) + if size is None and kwargs['snapshot'] is not None: + size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] + + LOG.audit(_("Create volume of %s GB"), size, context=context) + + image_href = None + image_uuid = None + if self.ext_mgr.is_loaded('os-image-create'): + image_href = volume.get('imageRef') + if image_href: + image_uuid = self._image_uuid_from_href(image_href) + kwargs['image_id'] = image_uuid + + kwargs['availability_zone'] = volume.get('availability_zone', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + retval = _translate_volume_detail_view(context, + dict(new_volume.iteritems()), + image_uuid) + + return {'volume': retval} + + def _get_volume_search_options(self): + """Return volume search options allowed by non-admin.""" + return ('display_name', 'status') + + @wsgi.serializers(xml=VolumeTemplate) + def update(self, req, id, body): + """Update a volume.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if 'volume' not in body: + raise exc.HTTPUnprocessableEntity() + + volume = body['volume'] + update_dict = {} + + valid_update_keys = ( + 'display_name', + 'display_description', + 'metadata', + ) + + for key in valid_update_keys: + if key in volume: + update_dict[key] = volume[key] + + try: + volume = self.volume_api.get(context, id) + self.volume_api.update(context, volume, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + volume.update(update_dict) + + return {'volume': _translate_volume_detail_view(context, volume)} + + +def create_resource(ext_mgr): + return wsgi.Resource(VolumeController(ext_mgr)) + + +def remove_invalid_options(context, search_options, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in search_options + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%(bad_options)s' from query") % locals() + LOG.debug(log_msg) + for opt in unknown_options: + del search_options[opt] diff --git a/cinder/api/v2/__init__.py b/cinder/api/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/api/v2/limits.py b/cinder/api/v2/limits.py new file mode 100644 index 0000000000..b8a0ad848d --- /dev/null +++ b/cinder/api/v2/limits.py @@ -0,0 +1,482 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Module dedicated functions/classes dealing with rate limiting requests. +""" + +import collections +import copy +import httplib +import math +import re +import time + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.api.views import limits as limits_views +from cinder.api import xmlutil +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder import quota +from cinder import wsgi as base_wsgi + +QUOTAS = quota.QUOTAS + + +# Convenience constants for the limits dictionary passed to Limiter(). +PER_SECOND = 1 +PER_MINUTE = 60 +PER_HOUR = 60 * 60 +PER_DAY = 60 * 60 * 24 + + +limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class LimitsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('limits', selector='limits') + + rates = xmlutil.SubTemplateElement(root, 'rates') + rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') + rate.set('uri', 'uri') + rate.set('regex', 'regex') + limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') + limit.set('value', 'value') + limit.set('verb', 'verb') + limit.set('remaining', 'remaining') + limit.set('unit', 'unit') + limit.set('next-available', 'next-available') + + absolute = xmlutil.SubTemplateElement(root, 'absolute', + selector='absolute') + limit = xmlutil.SubTemplateElement(absolute, 'limit', + selector=xmlutil.get_items) + limit.set('name', 0) + limit.set('value', 1) + + return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) + + +class LimitsController(object): + """ + Controller for accessing limits in the OpenStack API. + """ + + @wsgi.serializers(xml=LimitsTemplate) + def index(self, req): + """ + Return all global and rate limit information. + """ + context = req.environ['cinder.context'] + quotas = QUOTAS.get_project_quotas(context, context.project_id, + usages=False) + abs_limits = dict((k, v['limit']) for k, v in quotas.items()) + rate_limits = req.environ.get("cinder.limits", []) + + builder = self._get_view_builder(req) + return builder.build(rate_limits, abs_limits) + + def _get_view_builder(self, req): + return limits_views.ViewBuilder() + + +def create_resource(): + return wsgi.Resource(LimitsController()) + + +class Limit(object): + """ + Stores information about a limit for HTTP requests. + """ + + UNITS = { + 1: "SECOND", + 60: "MINUTE", + 60 * 60: "HOUR", + 60 * 60 * 24: "DAY", + } + + UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) + + def __init__(self, verb, uri, regex, value, unit): + """ + Initialize a new `Limit`. + + @param verb: HTTP verb (POST, PUT, etc.) + @param uri: Human-readable URI + @param regex: Regular expression format for this limit + @param value: Integer number of requests which can be made + @param unit: Unit of measure for the value parameter + """ + self.verb = verb + self.uri = uri + self.regex = regex + self.value = int(value) + self.unit = unit + self.unit_string = self.display_unit().lower() + self.remaining = int(value) + + if value <= 0: + raise ValueError("Limit value must be > 0") + + self.last_request = None + self.next_request = None + + self.water_level = 0 + self.capacity = self.unit + self.request_value = float(self.capacity) / float(self.value) + msg = _("Only %(value)s %(verb)s request(s) can be " + "made to %(uri)s every %(unit_string)s.") + self.error_message = msg % self.__dict__ + + def __call__(self, verb, url): + """ + Represents a call to this limit from a relevant request. + + @param verb: string http verb (POST, GET, etc.) + @param url: string URL + """ + if self.verb != verb or not re.match(self.regex, url): + return + + now = self._get_time() + + if self.last_request is None: + self.last_request = now + + leak_value = now - self.last_request + + self.water_level -= leak_value + self.water_level = max(self.water_level, 0) + self.water_level += self.request_value + + difference = self.water_level - self.capacity + + self.last_request = now + + if difference > 0: + self.water_level -= self.request_value + self.next_request = now + difference + return difference + + cap = self.capacity + water = self.water_level + val = self.value + + self.remaining = math.floor(((cap - water) / cap) * val) + self.next_request = now + + def _get_time(self): + """Retrieve the current time. Broken out for testability.""" + return time.time() + + def display_unit(self): + """Display the string name of the unit.""" + return self.UNITS.get(self.unit, "UNKNOWN") + + def display(self): + """Return a useful representation of this class.""" + return { + "verb": self.verb, + "URI": self.uri, + "regex": self.regex, + "value": self.value, + "remaining": int(self.remaining), + "unit": self.display_unit(), + "resetTime": int(self.next_request or self._get_time()), + } + +# "Limit" format is a dictionary with the HTTP verb, human-readable URI, +# a regular-expression to match, value and unit of measure (PER_DAY, etc.) + +DEFAULT_LIMITS = [ + Limit("POST", "*", ".*", 10, PER_MINUTE), + Limit("POST", "*/servers", "^/servers", 50, PER_DAY), + Limit("PUT", "*", ".*", 10, PER_MINUTE), + Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), + Limit("DELETE", "*", ".*", 100, PER_MINUTE), +] + + +class RateLimitingMiddleware(base_wsgi.Middleware): + """ + Rate-limits requests passing through this middleware. All limit information + is stored in memory for this implementation. + """ + + def __init__(self, application, limits=None, limiter=None, **kwargs): + """ + Initialize new `RateLimitingMiddleware`, which wraps the given WSGI + application and sets up the given limits. + + @param application: WSGI application to wrap + @param limits: String describing limits + @param limiter: String identifying class for representing limits + + Other parameters are passed to the constructor for the limiter. + """ + base_wsgi.Middleware.__init__(self, application) + + # Select the limiter class + if limiter is None: + limiter = Limiter + else: + limiter = importutils.import_class(limiter) + + # Parse the limits, if any are provided + if limits is not None: + limits = limiter.parse_limits(limits) + + self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """ + Represents a single call through this middleware. We should record the + request if we have a limit relevant to it. If no limit is relevant to + the request, ignore it. + + If the request should be rate limited, return a fault telling the user + they are over the limit and need to retry later. + """ + verb = req.method + url = req.url + context = req.environ.get("cinder.context") + + if context: + username = context.user_id + else: + username = None + + delay, error = self._limiter.check_for_delay(verb, url, username) + + if delay: + msg = _("This request was rate-limited.") + retry = time.time() + delay + return wsgi.OverLimitFault(msg, error, retry) + + req.environ["cinder.limits"] = self._limiter.get_limits(username) + + return self.application + + +class Limiter(object): + """ + Rate-limit checking class which handles limits in memory. + """ + + def __init__(self, limits, **kwargs): + """ + Initialize the new `Limiter`. + + @param limits: List of `Limit` objects + """ + self.limits = copy.deepcopy(limits) + self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) + + # Pick up any per-user limit information + for key, value in kwargs.items(): + if key.startswith('user:'): + username = key[5:] + self.levels[username] = self.parse_limits(value) + + def get_limits(self, username=None): + """ + Return the limits for a given user. + """ + return [limit.display() for limit in self.levels[username]] + + def check_for_delay(self, verb, url, username=None): + """ + Check the given verb/user/user triplet for limit. + + @return: Tuple of delay (in seconds) and error message (or None, None) + """ + delays = [] + + for limit in self.levels[username]: + delay = limit(verb, url) + if delay: + delays.append((delay, limit.error_message)) + + if delays: + delays.sort() + return delays[0] + + return None, None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. We + # put this in the class so that subclasses can override the + # default limit parsing. + @staticmethod + def parse_limits(limits): + """ + Convert a string into a list of Limit instances. This + implementation expects a semicolon-separated sequence of + parenthesized groups, where each group contains a + comma-separated sequence consisting of HTTP method, + user-readable URI, a URI reg-exp, an integer number of + requests which can be made, and a unit of measure. Valid + values for the latter are "SECOND", "MINUTE", "HOUR", and + "DAY". + + @return: List of Limit instances. + """ + + # Handle empty limit strings + limits = limits.strip() + if not limits: + return [] + + # Split up the limits by semicolon + result = [] + for group in limits.split(';'): + group = group.strip() + if group[:1] != '(' or group[-1:] != ')': + raise ValueError("Limit rules must be surrounded by " + "parentheses") + group = group[1:-1] + + # Extract the Limit arguments + args = [a.strip() for a in group.split(',')] + if len(args) != 5: + raise ValueError("Limit rules must contain the following " + "arguments: verb, uri, regex, value, unit") + + # Pull out the arguments + verb, uri, regex, value, unit = args + + # Upper-case the verb + verb = verb.upper() + + # Convert value--raises ValueError if it's not integer + value = int(value) + + # Convert unit + unit = unit.upper() + if unit not in Limit.UNIT_MAP: + raise ValueError("Invalid units specified") + unit = Limit.UNIT_MAP[unit] + + # Build a limit + result.append(Limit(verb, uri, regex, value, unit)) + + return result + + +class WsgiLimiter(object): + """ + Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. + + To use, POST ``/`` with JSON data such as:: + + { + "verb" : GET, + "path" : "/servers" + } + + and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds + header containing the number of seconds to wait before the action would + succeed. + """ + + def __init__(self, limits=None): + """ + Initialize the new `WsgiLimiter`. + + @param limits: List of `Limit` objects + """ + self._limiter = Limiter(limits or DEFAULT_LIMITS) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, request): + """ + Handles a call to this application. Returns 204 if the request is + acceptable to the limiter, else a 403 is returned with a relevant + header indicating when the request *will* succeed. + """ + if request.method != "POST": + raise webob.exc.HTTPMethodNotAllowed() + + try: + info = dict(jsonutils.loads(request.body)) + except ValueError: + raise webob.exc.HTTPBadRequest() + + username = request.path_info_pop() + verb = info.get("verb") + path = info.get("path") + + delay, error = self._limiter.check_for_delay(verb, path, username) + + if delay: + headers = {"X-Wait-Seconds": "%.2f" % delay} + return webob.exc.HTTPForbidden(headers=headers, explanation=error) + else: + return webob.exc.HTTPNoContent() + + +class WsgiLimiterProxy(object): + """ + Rate-limit requests based on answers from a remote source. + """ + + def __init__(self, limiter_address): + """ + Initialize the new `WsgiLimiterProxy`. + + @param limiter_address: IP/port combination of where to request limit + """ + self.limiter_address = limiter_address + + def check_for_delay(self, verb, path, username=None): + body = jsonutils.dumps({"verb": verb, "path": path}) + headers = {"Content-Type": "application/json"} + + conn = httplib.HTTPConnection(self.limiter_address) + + if username: + conn.request("POST", "/%s" % (username), body, headers) + else: + conn.request("POST", "/", body, headers) + + resp = conn.getresponse() + + if 200 >= resp.status < 300: + return None, None + + return resp.getheader("X-Wait-Seconds"), resp.read() or None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. + # This implementation returns an empty list, since all limit + # decisions are made by a remote server. + @staticmethod + def parse_limits(limits): + """ + Ignore a limits string--simply doesn't apply for the limit + proxy. + + @return: Empty list. + """ + + return [] diff --git a/cinder/api/v2/router.py b/cinder/api/v2/router.py new file mode 100644 index 0000000000..e3e51399f1 --- /dev/null +++ b/cinder/api/v2/router.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +from cinder.api import extensions +import cinder.api.openstack +from cinder.api.v2 import limits +from cinder.api.v2 import snapshots +from cinder.api.v2 import types +from cinder.api.v2 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper, ext_mgr): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource(ext_mgr) + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['limits'] = limits.create_resource() + mapper.resource("limit", "limits", + controller=self.resources['limits']) diff --git a/cinder/api/v2/snapshot_metadata.py b/cinder/api/v2/snapshot_metadata.py new file mode 100644 index 0000000000..6322204ff7 --- /dev/null +++ b/cinder/api/v2/snapshot_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(object): + """ The volume metadata API controller for the OpenStack API """ + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, snapshot_id): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, snapshot_id): + """ Returns the list of metadata for a given snapshot""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, snapshot_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, snapshot_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_snapshot_metadata(context, + snapshot_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_snapshot_metadata(self, context, + snapshot_id, metadata, + delete=False): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + return self.volume_api.update_snapshot_metadata(context, + snapshot, + metadata, + delete) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, snapshot_id, id): + """ Return a single metadata item """ + context = req.environ['cinder.context'] + data = self._get_metadata(context, snapshot_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, snapshot_id, id): + """ Deletes an existing metadata """ + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, snapshot_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py new file mode 100644 index 0000000000..5c7dc1f9b9 --- /dev/null +++ b/cinder/api/v2/snapshots.py @@ -0,0 +1,257 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v2 import volumes +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import utils +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_snapshot_detail_view(context, snapshot): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, snapshot) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, snapshot): + """Maps keys for snapshots summary view.""" + d = {} + + d['id'] = snapshot['id'] + d['created_at'] = snapshot['created_at'] + d['name'] = snapshot['display_name'] + d['description'] = snapshot['display_description'] + d['volume_id'] = snapshot['volume_id'] + d['status'] = snapshot['status'] + d['size'] = snapshot['volume_size'] + + if snapshot.get('snapshot_metadata'): + metadata = snapshot.get('snapshot_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'), + dict): + d['metadata'] = snapshot['metadata'] + else: + d['metadata'] = {} + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('volume_id') + elem.append(common.MetadataTemplate()) + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr=None): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + allowed_search_options = ('status', 'volume_id', 'name') + volumes.remove_invalid_options(context, search_opts, + allowed_search_options) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + snapshots = self.volume_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.response(202) + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + kwargs = {} + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'snapshot'): + raise exc.HTTPBadRequest() + + snapshot = body['snapshot'] + kwargs['metadata'] = snapshot.get('metadata', None) + + volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in snapshot: + snapshot['display_name'] = snapshot.get('name') + del snapshot['name'] + + if not utils.is_valid_boolstr(force): + msg = _("Invalid value '%s' for force. ") % force + raise exception.InvalidParameterValue(err=msg) + + if strutils.bool_from_string(force): + new_snapshot = self.volume_api.create_snapshot_force( + context, + volume, + snapshot.get('display_name'), + snapshot.get('description'), + **kwargs) + else: + new_snapshot = self.volume_api.create_snapshot( + context, + volume, + snapshot.get('display_name'), + snapshot.get('description'), + **kwargs) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + @wsgi.serializers(xml=SnapshotTemplate) + def update(self, req, id, body): + """Update a snapshot.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPBadRequest() + + if 'snapshot' not in body: + raise exc.HTTPBadRequest() + + snapshot = body['snapshot'] + update_dict = {} + + valid_update_keys = ( + 'name', + 'description', + 'display_description', + ) + + # NOTE(thingee): v2 API allows description instead of + # display_description + if 'description' in snapshot: + snapshot['display_description'] = snapshot['description'] + del snapshot['description'] + + for key in valid_update_keys: + if key in snapshot: + update_dict[key] = snapshot[key] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in update_dict: + update_dict['display_name'] = update_dict['name'] + del update_dict['name'] + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.update_snapshot(context, snapshot, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + snapshot.update(update_dict) + + return {'snapshot': _translate_snapshot_detail_view(context, snapshot)} + + +def create_resource(ext_mgr): + return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v2/types.py b/cinder/api/v2/types.py new file mode 100644 index 0000000000..1513b8dcfa --- /dev/null +++ b/cinder/api/v2/types.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume type & volume types extra specs extension.""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.views import types as views_types +from cinder.api import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """Returns the list of volume types.""" + context = req.environ['cinder.context'] + vol_types = volume_types.get_all_types(context).values() + return self._view_builder.index(req, vol_types) + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """Return a single volume type item.""" + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return self._view_builder.show(req, vol_type) + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/v2/views/__init__.py b/cinder/api/v2/views/__init__.py new file mode 100644 index 0000000000..cbf4a45060 --- /dev/null +++ b/cinder/api/v2/views/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/v2/views/volumes.py b/cinder/api/v2/views/volumes.py new file mode 100644 index 0000000000..c1deec4663 --- /dev/null +++ b/cinder/api/v2/views/volumes.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = "volumes" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, volumes): + """Show a list of volumes without many details.""" + return self._list_view(self.summary, request, volumes) + + def detail_list(self, request, volumes): + """Detailed view of a list of volumes.""" + return self._list_view(self.detail, request, volumes) + + def summary(self, request, volume): + """Generic, non-detailed view of an volume.""" + return { + 'volume': { + 'id': volume['id'], + 'name': volume['display_name'], + 'links': self._get_links(request, + volume['id']), + }, + } + + def detail(self, request, volume): + """Detailed view of a single volume.""" + return { + 'volume': { + 'id': volume.get('id'), + 'status': volume.get('status'), + 'size': volume.get('size'), + 'availability_zone': volume.get('availability_zone'), + 'created_at': volume.get('created_at'), + 'attachments': self._get_attachments(volume), + 'name': volume.get('display_name'), + 'description': volume.get('display_description'), + 'volume_type': self._get_volume_type(volume), + 'snapshot_id': volume.get('snapshot_id'), + 'source_volid': volume.get('source_volid'), + 'metadata': self._get_volume_metadata(volume), + 'links': self._get_links(request, volume['id']) + } + } + + def _get_attachments(self, volume): + """Retrieves the attachments of the volume object""" + attachments = [] + + if volume['attach_status'] == 'attached': + d = {} + volume_id = volume['id'] + + # note(justinsb): we use the volume id as the id of the attachments + # object + d['id'] = volume_id + + d['volume_id'] = volume_id + d['server_id'] = volume['instance_uuid'] + if volume.get('mountpoint'): + d['device'] = volume['mountpoint'] + attachments.append(d) + + return attachments + + def _get_volume_metadata(self, volume): + """Retrieves the metadata of the volume object""" + if volume.get('volume_metadata'): + metadata = volume.get('volume_metadata') + return dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif volume.get('metadata') and isinstance(volume.get('metadata'), + dict): + return volume['metadata'] + return {} + + def _get_volume_type(self, volume): + """Retrieves the type the volume object is""" + if volume['volume_type_id'] and volume.get('volume_type'): + return volume['volume_type']['name'] + else: + return volume['volume_type_id'] + + def _list_view(self, func, request, volumes): + """Provide a view for a list of volumes.""" + volumes_list = [func(request, volume)['volume'] for volume in volumes] + volumes_links = self._get_collection_links(request, + volumes, + self._collection_name) + volumes_dict = dict(volumes=volumes_list) + + if volumes_links: + volumes_dict['volumes_links'] = volumes_links + + return volumes_dict diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py new file mode 100644 index 0000000000..bf46449d44 --- /dev/null +++ b/cinder/api/v2/volumes.py @@ -0,0 +1,362 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v2.views import volumes as volume_views +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('volume_type') + elem.set('snapshot_id') + elem.set('source_volid') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class CommonDeserializer(wsgi.MetadataXMLDeserializer): + """Common deserializer to handle xml-formatted volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + metadata_deserializer = common.MetadataXMLDeserializer() + + def _extract_volume(self, node): + """Marshal the volume attribute of a parsed request.""" + volume = {} + volume_node = self.find_first_child_named(node, 'volume') + + attributes = ['name', 'description', 'size', + 'volume_type', 'availability_zone'] + for attr in attributes: + if volume_node.getAttribute(attr): + volume[attr] = volume_node.getAttribute(attr) + + metadata_node = self.find_first_child_named(volume_node, 'metadata') + if metadata_node is not None: + volume['metadata'] = self.extract_metadata(metadata_node) + + return volume + + +class CreateDeserializer(CommonDeserializer): + """Deserializer to handle xml-formatted create volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + def default(self, string): + """Deserialize an xml-formatted volume create request.""" + dom = utils.safe_minidom_parse_string(string) + volume = self._extract_volume(dom) + return {'body': {'volume': volume}} + + +class VolumeController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + _view_builder_class = volume_views.ViewBuilder + + def __init__(self, ext_mgr): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(VolumeController, self).__init__() + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._view_builder.detail(req, vol) + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._get_volumes(req, is_detail=False) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._get_volumes(req, is_detail=True) + + def _get_volumes(self, req, is_detail): + """Returns a list of volumes, transformed through view builder.""" + + context = req.environ['cinder.context'] + + params = req.params.copy() + marker = params.pop('marker', None) + limit = params.pop('limit', None) + sort_key = params.pop('sort_key', 'created_at') + sort_dir = params.pop('sort_dir', 'desc') + params.pop('offset', None) + filters = params + + remove_invalid_options(context, + filters, self._get_volume_filter_options()) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in filters: + filters['display_name'] = filters['name'] + del filters['name'] + + volumes = self.volume_api.get_all(context, marker, limit, sort_key, + sort_dir, filters) + limited_list = common.limited(volumes, req) + + if is_detail: + volumes = self._view_builder.detail_list(req, limited_list) + else: + volumes = self._view_builder.summary_list(req, limited_list) + return volumes + + def _image_uuid_from_href(self, image_href): + # If the image href was generated by nova api, strip image_href + # down to an id. + try: + image_uuid = image_href.split('/').pop() + except (TypeError, AttributeError): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + if not uuidutils.is_uuid_like(image_uuid): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + return image_uuid + + @wsgi.response(202) + @wsgi.serializers(xml=VolumeTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Creates a new volume.""" + if not self.is_valid_body(body, 'volume'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + volume = body['volume'] + + kwargs = {} + + # NOTE(thingee): v2 API allows name instead of display_name + if volume.get('name'): + volume['display_name'] = volume.get('name') + del volume['name'] + + # NOTE(thingee): v2 API allows description instead of description + if volume.get('description'): + volume['display_description'] = volume.get('description') + del volume['description'] + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + try: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + source_volid = volume.get('source_volid') + if source_volid is not None: + kwargs['source_volume'] = self.volume_api.get_volume(context, + source_volid) + else: + kwargs['source_volume'] = None + + size = volume.get('size', None) + if size is None and kwargs['snapshot'] is not None: + size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] + + LOG.audit(_("Create volume of %s GB"), size, context=context) + + image_href = None + image_uuid = None + if self.ext_mgr.is_loaded('os-image-create'): + image_href = volume.get('imageRef') + if image_href: + image_uuid = self._image_uuid_from_href(image_href) + kwargs['image_id'] = image_uuid + + kwargs['availability_zone'] = volume.get('availability_zone', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + retval = self._view_builder.summary(req, dict(new_volume.iteritems())) + + return retval + + def _get_volume_filter_options(self): + """Return volume search options allowed by non-admin.""" + return ('name', 'status') + + @wsgi.serializers(xml=VolumeTemplate) + def update(self, req, id, body): + """Update a volume.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPBadRequest() + + if 'volume' not in body: + raise exc.HTTPBadRequest() + + volume = body['volume'] + update_dict = {} + + valid_update_keys = ( + 'name', + 'description', + 'metadata', + ) + + for key in valid_update_keys: + if key in volume: + update_dict[key] = volume[key] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in update_dict: + update_dict['display_name'] = update_dict['name'] + del update_dict['name'] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'description' in update_dict: + update_dict['display_description'] = update_dict['description'] + del update_dict['description'] + + try: + volume = self.volume_api.get(context, id) + self.volume_api.update(context, volume, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + volume.update(update_dict) + + return self._view_builder.detail(req, volume) + + +def create_resource(ext_mgr): + return wsgi.Resource(VolumeController(ext_mgr)) + + +def remove_invalid_options(context, filters, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in filters + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%s' from query") % bad_options + LOG.debug(log_msg) + for opt in unknown_options: + del filters[opt] diff --git a/cinder/api/versions.py b/cinder/api/versions.py new file mode 100644 index 0000000000..30627bd29a --- /dev/null +++ b/cinder/api/versions.py @@ -0,0 +1,282 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +from lxml import etree + +from cinder.api.openstack import wsgi +from cinder.api.views import versions as views_versions +from cinder.api import xmlutil +from cinder import flags + +FLAGS = flags.FLAGS + + +_KNOWN_VERSIONS = { + "v2.0": { + "id": "v2.0", + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + }, + "v1.0": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + } + +} + + +def get_supported_versions(): + versions = {} + + if FLAGS.enable_v1_api: + versions['v1.0'] = _KNOWN_VERSIONS['v1.0'] + if FLAGS.enable_v2_api: + versions['v2.0'] = _KNOWN_VERSIONS['v2.0'] + + return versions + + +class MediaTypesTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return 'media-types' in datum + + +def make_version(elem): + elem.set('id') + elem.set('status') + elem.set('updated') + + mts = MediaTypesTemplateElement('media-types') + elem.append(mts) + + mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') + mt.set('base') + mt.set('type') + + xmlutil.make_links(elem, 'links') + + +version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class VersionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('version', selector='version') + make_version(root) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class VersionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('versions') + elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class ChoicesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('choices') + elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class AtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} + if not xmlns: + self.xmlns = wsgi.XMLNS_ATOM + else: + self.xmlns = xmlns + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = datetime.datetime.strptime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title + + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + etree.SubElement(feed, 'updated').text = recent + + etree.SubElement(feed, 'id').text = feed_id + + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) + + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' + + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry + + +class VersionsAtomSerializer(AtomSerializer): + def default(self, data): + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) + + +class VersionAtomSerializer(AtomSerializer): + def default(self, data): + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) + + +class Versions(wsgi.Resource): + + def __init__(self): + super(Versions, self).__init__(None) + + @wsgi.serializers(xml=VersionsTemplate, + atom=VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(get_supported_versions()) + + @wsgi.serializers(xml=ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(get_supported_versions(), req) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args + + +class VolumeVersionV1(object): + @wsgi.serializers(xml=VersionTemplate, + atom=VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(_KNOWN_VERSIONS['v1.0']) + + +def create_resource(): + return wsgi.Resource(VolumeVersionV1()) diff --git a/cinder/api/views/__init__.py b/cinder/api/views/__init__.py new file mode 100644 index 0000000000..d65c689a83 --- /dev/null +++ b/cinder/api/views/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/views/backups.py b/cinder/api/views/backups.py new file mode 100644 index 0000000000..446bf30c61 --- /dev/null +++ b/cinder/api/views/backups.py @@ -0,0 +1,90 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model backup API responses as a python dictionary.""" + + _collection_name = "backups" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, backups): + """Show a list of backups without many details.""" + return self._list_view(self.summary, request, backups) + + def detail_list(self, request, backups): + """Detailed view of a list of backups .""" + return self._list_view(self.detail, request, backups) + + def summary(self, request, backup): + """Generic, non-detailed view of a backup.""" + return { + 'backup': { + 'id': backup['id'], + 'name': backup['display_name'], + 'links': self._get_links(request, + backup['id']), + }, + } + + def restore_summary(self, request, restore): + """Generic, non-detailed view of a restore.""" + return { + 'restore': { + 'backup_id': restore['backup_id'], + 'volume_id': restore['volume_id'], + }, + } + + def detail(self, request, backup): + """Detailed view of a single backup.""" + return { + 'backup': { + 'id': backup.get('id'), + 'status': backup.get('status'), + 'size': backup.get('size'), + 'object_count': backup.get('object_count'), + 'availability_zone': backup.get('availability_zone'), + 'container': backup.get('container'), + 'created_at': backup.get('created_at'), + 'name': backup.get('display_name'), + 'description': backup.get('display_description'), + 'fail_reason': backup.get('fail_reason'), + 'volume_id': backup.get('volume_id'), + 'links': self._get_links(request, backup['id']) + } + } + + def _list_view(self, func, request, backups): + """Provide a view for a list of backups.""" + backups_list = [func(request, backup)['backup'] for backup in backups] + backups_links = self._get_collection_links(request, + backups, + self._collection_name) + backups_dict = dict(backups=backups_list) + + if backups_links: + backups_dict['backups_links'] = backups_links + + return backups_dict diff --git a/cinder/api/views/limits.py b/cinder/api/views/limits.py new file mode 100644 index 0000000000..81b1e794ec --- /dev/null +++ b/cinder/api/views/limits.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder.openstack.common import timeutils + + +class ViewBuilder(object): + """OpenStack API base limits view builder.""" + + def build(self, rate_limits, absolute_limits): + rate_limits = self._build_rate_limits(rate_limits) + absolute_limits = self._build_absolute_limits(absolute_limits) + + output = { + "limits": { + "rate": rate_limits, + "absolute": absolute_limits, + }, + } + + return output + + def _build_absolute_limits(self, absolute_limits): + """Builder for absolute limits + + absolute_limits should be given as a dict of limits. + For example: {"ram": 512, "gigabytes": 1024}. + + """ + limit_names = { + "ram": ["maxTotalRAMSize"], + "instances": ["maxTotalInstances"], + "cores": ["maxTotalCores"], + "gigabytes": ["maxTotalVolumeGigabytes"], + "volumes": ["maxTotalVolumes"], + "key_pairs": ["maxTotalKeypairs"], + "floating_ips": ["maxTotalFloatingIps"], + "metadata_items": ["maxServerMeta", "maxImageMeta"], + "injected_files": ["maxPersonality"], + "injected_file_content_bytes": ["maxPersonalitySize"], + } + limits = {} + for name, value in absolute_limits.iteritems(): + if name in limit_names and value is not None: + for name in limit_names[name]: + limits[name] = value + return limits + + def _build_rate_limits(self, rate_limits): + limits = [] + for rate_limit in rate_limits: + _rate_limit_key = None + _rate_limit = self._build_rate_limit(rate_limit) + + # check for existing key + for limit in limits: + if (limit["uri"] == rate_limit["URI"] and + limit["regex"] == rate_limit["regex"]): + _rate_limit_key = limit + break + + # ensure we have a key if we didn't find one + if not _rate_limit_key: + _rate_limit_key = { + "uri": rate_limit["URI"], + "regex": rate_limit["regex"], + "limit": [], + } + limits.append(_rate_limit_key) + + _rate_limit_key["limit"].append(_rate_limit) + + return limits + + def _build_rate_limit(self, rate_limit): + _get_utc = datetime.datetime.utcfromtimestamp + next_avail = _get_utc(rate_limit["resetTime"]) + return { + "verb": rate_limit["verb"], + "value": rate_limit["value"], + "remaining": int(rate_limit["remaining"]), + "unit": rate_limit["unit"], + "next-available": timeutils.isotime(at=next_avail), + } diff --git a/cinder/api/views/share_snapshots.py b/cinder/api/views/share_snapshots.py new file mode 100644 index 0000000000..dc1c8a05c0 --- /dev/null +++ b/cinder/api/views/share_snapshots.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = 'share-snapshots' + + def summary_list(self, request, snapshots): + """Show a list of share snapshots without many details.""" + return self._list_view(self.summary, request, snapshots) + + def detail_list(self, request, snapshots): + """Detailed view of a list of share snapshots.""" + return self._list_view(self.detail, request, snapshots) + + def summary(self, request, snapshot): + """Generic, non-detailed view of an share snapshot.""" + return { + 'share-snapshot': { + 'id': snapshot.get('id'), + 'name': snapshot.get('display_name'), + 'links': self._get_links(request, snapshot['id']) + } + } + + def detail(self, request, snapshot): + """Detailed view of a single share snapshot.""" + return { + 'share-snapshot': { + 'id': snapshot.get('id'), + 'share_id': snapshot.get('share_id'), + 'share_size': snapshot.get('share_size'), + 'created_at': snapshot.get('created_at'), + 'status': snapshot.get('status'), + 'name': snapshot.get('display_name'), + 'description': snapshot.get('display_description'), + 'share_proto': snapshot.get('share_proto'), + 'export_location': snapshot.get('export_location'), + 'links': self._get_links(request, snapshot['id']) + } + } + + def _list_view(self, func, request, snapshots): + """Provide a view for a list of share snapshots.""" + snapshots_list = [func(request, snapshot)['share-snapshot'] + for snapshot in snapshots] + snapshots_links = self._get_collection_links(request, + snapshots, + self._collection_name) + snapshots_dict = {self._collection_name: snapshots_list} + + if snapshots_links: + snapshots_dict['share_snapshots_links'] = snapshots_links + + return snapshots_dict diff --git a/cinder/api/views/shares.py b/cinder/api/views/shares.py new file mode 100644 index 0000000000..32fb58a5ac --- /dev/null +++ b/cinder/api/views/shares.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = 'shares' + + def summary_list(self, request, shares): + """Show a list of shares without many details.""" + return self._list_view(self.summary, request, shares) + + def detail_list(self, request, shares): + """Detailed view of a list of shares.""" + return self._list_view(self.detail, request, shares) + + def summary(self, request, share): + """Generic, non-detailed view of an share.""" + return { + 'share': { + 'id': share.get('id'), + 'name': share.get('display_name'), + 'links': self._get_links(request, share['id']) + } + } + + def detail(self, request, share): + """Detailed view of a single share.""" + return { + 'share': { + 'id': share.get('id'), + 'size': share.get('size'), + 'availability_zone': share.get('availability_zone'), + 'created_at': share.get('created_at'), + 'status': share.get('status'), + 'name': share.get('display_name'), + 'description': share.get('display_description'), + 'snapshot_id': share.get('snapshot_id'), + 'share_proto': share.get('share_proto'), + 'export_location': share.get('export_location'), + 'links': self._get_links(request, share['id']) + } + } + + def _list_view(self, func, request, shares): + """Provide a view for a list of shares.""" + shares_list = [func(request, share)['share'] for share in shares] + shares_links = self._get_collection_links(request, + shares, + self._collection_name) + shares_dict = dict(shares=shares_list) + + if shares_links: + shares_dict['shares_links'] = shares_links + + return shares_dict diff --git a/cinder/api/views/types.py b/cinder/api/views/types.py new file mode 100644 index 0000000000..675ec01fe5 --- /dev/null +++ b/cinder/api/views/types.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common + + +class ViewBuilder(common.ViewBuilder): + + def show(self, request, volume_type, brief=False): + """Trim away extraneous volume type attributes.""" + trimmed = dict(id=volume_type.get('id'), + name=volume_type.get('name'), + extra_specs=volume_type.get('extra_specs')) + return trimmed if brief else dict(volume_type=trimmed) + + def index(self, request, volume_types): + """Index over trimmed volume types""" + volume_types_list = [self.show(request, volume_type, True) + for volume_type in volume_types] + return dict(volume_types=volume_types_list) diff --git a/cinder/api/views/versions.py b/cinder/api/views/versions.py new file mode 100644 index 0000000000..371033ded6 --- /dev/null +++ b/cinder/api/views/versions.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + def __init__(self, base_url): + """ + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [{"rel": "self", + "href": self.generate_href(req.path), }, ], + "media-types": version['media-types'], }) + + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in sorted(versions.keys()): + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', }) + return dict(version=reval) + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href() + + links = [{'rel': 'self', + 'href': href, }, ] + + return links + + def generate_href(self, path=None): + """Create an url that refers to a specific version_number.""" + version_number = 'v1' + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/cinder/api/xmlutil.py b/cinder/api/xmlutil.py new file mode 100644 index 0000000000..93b722e1f5 --- /dev/null +++ b/cinder/api/xmlutil.py @@ -0,0 +1,911 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path + +from lxml import etree + +from cinder import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' +XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1' +XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-volume/2.0/' + 'content') +XMLNS_SHARE_V1 = '' + + +def validate_schema(xml, schema_name): + if isinstance(xml, str): + xml = etree.fromstring(xml) + base_path = 'cinder/api/schemas/v1.1/' + if schema_name in ('atom', 'atom-link'): + base_path = 'cinder/api/schemas/' + schema_path = os.path.join(utils.cinderdir(), + '%s%s.rng' % (base_path, schema_name)) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) + + +class Selector(object): + """Selects datum to operate on from an object.""" + + def __init__(self, *chain): + """Initialize the selector. + + Each argument is a subsequent index into the object. + """ + + self.chain = chain + + def __repr__(self): + """Return a representation of the selector.""" + + return "Selector" + repr(self.chain) + + def __call__(self, obj, do_raise=False): + """Select a datum to operate on. + + Selects the relevant datum within the object. + + :param obj: The object from which to select the object. + :param do_raise: If False (the default), return None if the + indexed datum does not exist. Otherwise, + raise a KeyError. + """ + + # Walk the selector list + for elem in self.chain: + # If it's callable, call it + if callable(elem): + obj = elem(obj) + else: + # Use indexing + try: + obj = obj[elem] + except (KeyError, IndexError): + # No sense going any further + if do_raise: + # Convert to a KeyError, for consistency + raise KeyError(elem) + return None + + # Return the finally-selected object + return obj + + +def get_items(obj): + """Get items in obj.""" + + return list(obj.items()) + + +class EmptyStringSelector(Selector): + """Returns the empty string if Selector would return None.""" + def __call__(self, obj, do_raise=False): + """Returns empty string if the selected value does not exist.""" + + try: + return super(EmptyStringSelector, self).__call__(obj, True) + except KeyError: + return "" + + +class ConstantSelector(object): + """Returns a constant.""" + + def __init__(self, value): + """Initialize the selector. + + :param value: The value to return. + """ + + self.value = value + + def __repr__(self): + """Return a representation of the selector.""" + + return repr(self.value) + + def __call__(self, _obj, _do_raise=False): + """Select a datum to operate on. + + Returns a constant value. Compatible with + Selector.__call__(). + """ + + return self.value + + +class TemplateElement(object): + """Represent an element in the template.""" + + def __init__(self, tag, attrib=None, selector=None, subselector=None, + **extra): + """Initialize an element. + + Initializes an element in the template. Keyword arguments + specify attributes to be set on the element; values must be + callables. See TemplateElement.set() for more information. + + :param tag: The name of the tag to create. + :param attrib: An optional dictionary of element attributes. + :param selector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + :param subselector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + This is used to further refine the datum + object returned by selector in the event + that it is a list of objects. + """ + + # Convert selector into a Selector + if selector is None: + selector = Selector() + elif not callable(selector): + selector = Selector(selector) + + # Convert subselector into a Selector + if subselector is not None and not callable(subselector): + subselector = Selector(subselector) + + self.tag = tag + self.selector = selector + self.subselector = subselector + self.attrib = {} + self._text = None + self._children = [] + self._childmap = {} + + # Run the incoming attributes through set() so that they + # become selectorized + if not attrib: + attrib = {} + attrib.update(extra) + for k, v in attrib.items(): + self.set(k, v) + + def __repr__(self): + """Return a representation of the template element.""" + + return ('<%s.%s %r at %#x>' % + (self.__class__.__module__, self.__class__.__name__, + self.tag, id(self))) + + def __len__(self): + """Return the number of child elements.""" + + return len(self._children) + + def __contains__(self, key): + """Determine whether a child node named by key exists.""" + + return key in self._childmap + + def __getitem__(self, idx): + """Retrieve a child node by index or name.""" + + if isinstance(idx, basestring): + # Allow access by node name + return self._childmap[idx] + else: + return self._children[idx] + + def append(self, elem): + """Append a child to the element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.append(elem) + self._childmap[elem.tag] = elem + + def extend(self, elems): + """Append children to the element.""" + + # Pre-evaluate the elements + elemmap = {} + elemlist = [] + for elem in elems: + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap or elem.tag in elemmap: + raise KeyError(elem.tag) + + elemmap[elem.tag] = elem + elemlist.append(elem) + + # Update the children + self._children.extend(elemlist) + self._childmap.update(elemmap) + + def insert(self, idx, elem): + """Insert a child element at the given index.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.insert(idx, elem) + self._childmap[elem.tag] = elem + + def remove(self, elem): + """Remove a child element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Check if element exists + if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: + raise ValueError(_('element is not a child')) + + self._children.remove(elem) + del self._childmap[elem.tag] + + def get(self, key): + """Get an attribute. + + Returns a callable which performs datum selection. + + :param key: The name of the attribute to get. + """ + + return self.attrib[key] + + def set(self, key, value=None): + """Set an attribute. + + :param key: The name of the attribute to set. + + :param value: A callable taking an object and optional boolean + do_raise indicator and returning the datum bound + to the attribute. If None, a Selector() will be + constructed from the key. If a string, a + Selector() will be constructed from the string. + """ + + # Convert value to a selector + if value is None: + value = Selector(key) + elif not callable(value): + value = Selector(value) + + self.attrib[key] = value + + def keys(self): + """Return the attribute names.""" + + return self.attrib.keys() + + def items(self): + """Return the attribute names and values.""" + + return self.attrib.items() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # We are a template element + return self + + def wrap(self): + """Wraps a template element to return a template.""" + + # Wrap in a basic Template + return Template(self) + + def apply(self, elem, obj): + """Apply text and attributes to an etree.Element. + + Applies the text and attribute instructions in the template + element to an etree.Element instance. + + :param elem: An etree.Element instance. + :param obj: The base object associated with this template + element. + """ + + # Start with the text... + if self.text is not None: + elem.text = unicode(self.text(obj)) + + # Now set up all the attributes... + for key, value in self.attrib.items(): + try: + elem.set(key, unicode(value(obj, True))) + except KeyError: + # Attribute has no value, so don't include it + pass + + def _render(self, parent, datum, patches, nsmap): + """Internal rendering. + + Renders the template node into an etree.Element object. + Returns the etree.Element object. + + :param parent: The parent etree.Element instance. + :param datum: The datum associated with this template element. + :param patches: A list of other template elements that must + also be applied. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance. + """ + + # Allocate a node + if callable(self.tag): + tagname = self.tag(datum) + else: + tagname = self.tag + elem = etree.Element(tagname, nsmap=nsmap) + + # If we have a parent, append the node to the parent + if parent is not None: + parent.append(elem) + + # If the datum is None, do nothing else + if datum is None: + return elem + + # Apply this template element to the element + self.apply(elem, datum) + + # Additionally, apply the patches + for patch in patches: + patch.apply(elem, datum) + + # We have fully rendered the element; return it + return elem + + def render(self, parent, obj, patches=[], nsmap=None): + """Render an object. + + Renders an object against this template node. Returns a list + of two-item tuples, where the first item is an etree.Element + instance and the second item is the datum associated with that + instance. + + :param parent: The parent for the etree.Element instances. + :param obj: The object to render this template element + against. + :param patches: A list of other template elements to apply + when rendering this template element. + :param nsmap: An optional namespace dictionary to attach to + the etree.Element instances. + """ + + # First, get the datum we're rendering + data = None if obj is None else self.selector(obj) + + # Check if we should render at all + if not self.will_render(data): + return [] + elif data is None: + return [(self._render(parent, None, patches, nsmap), None)] + + # Make the data into a list if it isn't already + if not isinstance(data, list): + data = [data] + elif parent is None: + raise ValueError(_('root element selecting a list')) + + # Render all the elements + elems = [] + for datum in data: + if self.subselector is not None: + datum = self.subselector(datum) + elems.append((self._render(parent, datum, patches, nsmap), datum)) + + # Return all the elements rendered, as well as the + # corresponding datum for the next step down the tree + return elems + + def will_render(self, datum): + """Hook method. + + An overridable hook method to determine whether this template + element will be rendered at all. By default, returns False + (inhibiting rendering) if the datum is None. + + :param datum: The datum associated with this template element. + """ + + # Don't render if datum is None + return datum is not None + + def _text_get(self): + """Template element text. + + Either None or a callable taking an object and optional + boolean do_raise indicator and returning the datum bound to + the text of the template element. + """ + + return self._text + + def _text_set(self, value): + # Convert value to a selector + if value is not None and not callable(value): + value = Selector(value) + + self._text = value + + def _text_del(self): + self._text = None + + text = property(_text_get, _text_set, _text_del) + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template rooted at this + element as a string, suitable for inclusion in debug logs. + """ + + # Build the inner contents of the tag... + contents = [self.tag, '!selector=%r' % self.selector] + + # Add the text... + if self.text is not None: + contents.append('!text=%r' % self.text) + + # Add all the other attributes + for key, value in self.attrib.items(): + contents.append('%s=%r' % (key, value)) + + # If there are no children, return it as a closed tag + if len(self) == 0: + return '<%s/>' % ' '.join([str(i) for i in contents]) + + # OK, recurse to our children + children = [c.tree() for c in self] + + # Return the result + return ('<%s>%s' % + (' '.join(contents), ''.join(children), self.tag)) + + +def SubTemplateElement(parent, tag, attrib=None, selector=None, + subselector=None, **extra): + """Create a template element as a child of another. + + Corresponds to the etree.SubElement interface. Parameters are as + for TemplateElement, with the addition of the parent. + """ + + # Convert attributes + attrib = attrib or {} + attrib.update(extra) + + # Get a TemplateElement + elem = TemplateElement(tag, attrib=attrib, selector=selector, + subselector=subselector) + + # Append the parent safely + if parent is not None: + parent.append(elem) + + return elem + + +class Template(object): + """Represent a template.""" + + def __init__(self, root, nsmap=None): + """Initialize a template. + + :param root: The root element of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + self.root = root.unwrap() if root is not None else None + self.nsmap = nsmap or {} + self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) + + def _serialize(self, parent, obj, siblings, nsmap=None): + """Internal serialization. + + Recursive routine to build a tree of etree.Element instances + from an object based on the template. Returns the first + etree.Element instance rendered, or None. + + :param parent: The parent etree.Element instance. Can be + None. + :param obj: The object to render. + :param siblings: The TemplateElement instances against which + to render the object. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance + rendered. + """ + + # First step, render the element + elems = siblings[0].render(parent, obj, siblings[1:], nsmap) + + # Now, recurse to all child elements + seen = set() + for idx, sibling in enumerate(siblings): + for child in sibling: + # Have we handled this child already? + if child.tag in seen: + continue + seen.add(child.tag) + + # Determine the child's siblings + nieces = [child] + for sib in siblings[idx + 1:]: + if child.tag in sib: + nieces.append(sib[child.tag]) + + # Now we recurse for every data element + for elem, datum in elems: + self._serialize(elem, datum, nieces) + + # Return the first element; at the top level, this will be the + # root element + if elems: + return elems[0][0] + + def serialize(self, obj, *args, **kwargs): + """Serialize an object. + + Serializes an object against the template. Returns a string + with the serialized XML. Positional and keyword arguments are + passed to etree.tostring(). + + :param obj: The object to serialize. + """ + + elem = self.make_tree(obj) + if elem is None: + return '' + + for k, v in self.serialize_options.items(): + kwargs.setdefault(k, v) + + # Serialize it into XML + return etree.tostring(elem, *args, **kwargs) + + def make_tree(self, obj): + """Create a tree. + + Serializes an object against the template. Returns an Element + node with appropriate children. + + :param obj: The object to serialize. + """ + + # If the template is empty, return the empty string + if self.root is None: + return None + + # Get the siblings and nsmap of the root element + siblings = self._siblings() + nsmap = self._nsmap() + + # Form the element tree + return self._serialize(None, obj, siblings, nsmap) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. By default, this is the root element itself. + """ + + return [self.root] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + """ + + return self.nsmap.copy() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # Return the root element + return self.root + + def wrap(self): + """Wraps a template element to return a template.""" + + # We are a template + return self + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. + + :param master: The master template to test. + """ + + return True + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template as a string, suitable + for inclusion in debug logs. + """ + + return "%r: %s" % (self, self.root.tree()) + + +class MasterTemplate(Template): + """Represent a master template. + + Master templates are versioned derivatives of templates that + additionally allow slave templates to be attached. Slave + templates allow modification of the serialized result without + directly changing the master. + """ + + def __init__(self, root, version, nsmap=None): + """Initialize a master template. + + :param root: The root element of the template. + :param version: The version number of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(MasterTemplate, self).__init__(root, nsmap) + self.version = version + self.slaves = [] + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object version %s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.version, id(self))) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. This is the root element plus the root elements of + all the slave templates. + """ + + return [self.root] + [slave.root for slave in self.slaves] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + The namespace dictionary is computed by taking the master + template's namespace dictionary and updating it from all the + slave templates. + """ + + nsmap = self.nsmap.copy() + for slave in self.slaves: + nsmap.update(slave._nsmap()) + return nsmap + + def attach(self, *slaves): + """Attach one or more slave templates. + + Attaches one or more slave templates to the master template. + Slave templates must have a root element with the same tag as + the master template. The slave template's apply() method will + be called to determine if the slave should be applied to this + master; if it returns False, that slave will be skipped. + (This allows filtering of slaves based on the version of the + master template.) + """ + + slave_list = [] + for slave in slaves: + slave = slave.wrap() + + # Make sure we have a tree match + if slave.root.tag != self.root.tag: + slavetag = slave.root.tag + mastertag = self.root.tag + msg = _("Template tree mismatch; adding slave %(slavetag)s " + "to master %(mastertag)s") % locals() + raise ValueError(msg) + + # Make sure slave applies to this template + if not slave.apply(self): + continue + + slave_list.append(slave) + + # Add the slaves + self.slaves.extend(slave_list) + + def copy(self): + """Return a copy of this master template.""" + + # Return a copy of the MasterTemplate + tmp = self.__class__(self.root, self.version, self.nsmap) + tmp.slaves = self.slaves[:] + return tmp + + +class SlaveTemplate(Template): + """Represent a slave template. + + Slave templates are versioned derivatives of templates. Each + slave has a minimum version and optional maximum version of the + master template to which they can be attached. + """ + + def __init__(self, root, min_vers, max_vers=None, nsmap=None): + """Initialize a slave template. + + :param root: The root element of the template. + :param min_vers: The minimum permissible version of the master + template for this slave template to apply. + :param max_vers: An optional upper bound for the master + template version. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(SlaveTemplate, self).__init__(root, nsmap) + self.min_vers = min_vers + self.max_vers = max_vers + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object versions %s-%s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.min_vers, self.max_vers, id(self))) + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. This + version requires the master template to have a version number + between min_vers and max_vers. + + :param master: The master template to test. + """ + + # Does the master meet our minimum version requirement? + if master.version < self.min_vers: + return False + + # How about our maximum version requirement? + if self.max_vers is not None and master.version > self.max_vers: + return False + + return True + + +class TemplateBuilder(object): + """Template builder. + + This class exists to allow templates to be lazily built without + having to build them each time they are needed. It must be + subclassed, and the subclass must implement the construct() + method, which must return a Template (or subclass) instance. The + constructor will always return the template returned by + construct(), or, if it has a copy() method, a copy of that + template. + """ + + _tmpl = None + + def __new__(cls, copy=True): + """Construct and return a template. + + :param copy: If True (the default), a copy of the template + will be constructed and returned, if possible. + """ + + # Do we need to construct the template? + if cls._tmpl is None: + tmp = super(TemplateBuilder, cls).__new__(cls) + + # Construct the template + cls._tmpl = tmp.construct() + + # If the template has a copy attribute, return the result of + # calling it + if copy and hasattr(cls._tmpl, 'copy'): + return cls._tmpl.copy() + + # Return the template + return cls._tmpl + + def construct(self): + """Construct a template. + + Called to construct a template instance, which it must return. + Only called once. + """ + + raise NotImplementedError(_("subclasses must implement construct()!")) + + +def make_links(parent, selector=None): + """ + Attach an Atom element to the parent. + """ + + elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, + selector=selector) + elem.set('rel') + elem.set('type') + elem.set('href') + + # Just for completeness... + return elem + + +def make_flat_dict(name, selector=None, subselector=None, ns=None): + """ + Utility for simple XML templates that traditionally used + XMLDictSerializer with no metadata. Returns a template element + where the top-level element has the given tag name, and where + sub-elements have tag names derived from the object's keys and + text derived from the object's values. This only works for flat + dictionary objects, not dictionaries containing nested lists or + dictionaries. + """ + + # Set up the names we need... + if ns is None: + elemname = name + tagname = Selector(0) + else: + elemname = '{%s}%s' % (ns, name) + tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) + + if selector is None: + selector = name + + # Build the root element + root = TemplateElement(elemname, selector=selector, + subselector=subselector) + + # Build an element to represent all the keys and values + elem = SubTemplateElement(root, tagname, selector=get_items) + elem.text = 1 + + # Return the template + return root diff --git a/cinder/backup/__init__.py b/cinder/backup/__init__.py new file mode 100644 index 0000000000..368e2ffff0 --- /dev/null +++ b/cinder/backup/__init__.py @@ -0,0 +1,23 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.backup import ' elsewhere. + +import cinder.flags +import cinder.openstack.common.importutils + +API = cinder.openstack.common.importutils.import_class( + cinder.flags.FLAGS.backup_api_class) diff --git a/cinder/backup/api.py b/cinder/backup/api.py new file mode 100644 index 0000000000..1b5d1d49bd --- /dev/null +++ b/cinder/backup/api.py @@ -0,0 +1,171 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to the volume backups service. +""" + +from eventlet import greenthread + +from cinder.backup import rpcapi as backup_rpcapi +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +import cinder.volume + + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +class API(base.Base): + """API for interacting with the volume backup manager.""" + + def __init__(self, db_driver=None): + self.backup_rpcapi = backup_rpcapi.BackupAPI() + self.volume_api = cinder.volume.API() + super(API, self).__init__(db_driver) + + def get(self, context, backup_id): + rv = self.db.backup_get(context, backup_id) + return dict(rv.iteritems()) + + def delete(self, context, backup_id): + """ + Make the RPC call to delete a volume backup. + """ + backup = self.get(context, backup_id) + if backup['status'] not in ['available', 'error']: + msg = _('Backup status must be available or error') + raise exception.InvalidBackup(reason=msg) + + self.db.backup_update(context, backup_id, {'status': 'deleting'}) + self.backup_rpcapi.delete_backup(context, + backup['host'], + backup['id']) + + # TODO(moorehef): Add support for search_opts, discarded atm + def get_all(self, context, search_opts={}): + if context.is_admin: + backups = self.db.backup_get_all(context) + else: + backups = self.db.backup_get_all_by_project(context, + context.project_id) + + return backups + + def create(self, context, name, description, volume_id, + container, availability_zone=None): + """ + Make the RPC call to create a volume backup. + """ + volume = self.volume_api.get(context, volume_id) + if volume['status'] != "available": + msg = _('Volume to be backed up must be available') + raise exception.InvalidVolume(reason=msg) + self.db.volume_update(context, volume_id, {'status': 'backing-up'}) + + options = {'user_id': context.user_id, + 'project_id': context.project_id, + 'display_name': name, + 'display_description': description, + 'volume_id': volume_id, + 'status': 'creating', + 'container': container, + 'size': volume['size'], + # TODO(DuncanT): This will need de-managling once + # multi-backend lands + 'host': volume['host'], } + + backup = self.db.backup_create(context, options) + + #TODO(DuncanT): In future, when we have a generic local attach, + # this can go via the scheduler, which enables + # better load ballancing and isolation of services + self.backup_rpcapi.create_backup(context, + backup['host'], + backup['id'], + volume_id) + + return backup + + def restore(self, context, backup_id, volume_id=None): + """ + Make the RPC call to restore a volume backup. + """ + backup = self.get(context, backup_id) + if backup['status'] != 'available': + msg = _('Backup status must be available') + raise exception.InvalidBackup(reason=msg) + + size = backup['size'] + if size is None: + msg = _('Backup to be restored has invalid size') + raise exception.InvalidBackup(reason=msg) + + # Create a volume if none specified. If a volume is specified check + # it is large enough for the backup + if volume_id is None: + name = 'restore_backup_%s' % backup_id + description = 'auto-created_from_restore_from_swift' + + LOG.audit(_("Creating volume of %(size)s GB for restore of " + "backup %(backup_id)s"), locals(), context=context) + volume = self.volume_api.create(context, size, name, description) + volume_id = volume['id'] + + while True: + volume = self.volume_api.get(context, volume_id) + if volume['status'] != 'creating': + break + greenthread.sleep(1) + else: + volume = self.volume_api.get(context, volume_id) + volume_size = volume['size'] + if volume_size < size: + err = _('volume size %(volume_size)d is too small to restore ' + 'backup of size %(size)d.') % locals() + raise exception.InvalidVolume(reason=err) + + if volume['status'] != "available": + msg = _('Volume to be restored to must be available') + raise exception.InvalidVolume(reason=msg) + + LOG.debug('Checking backup size %s against volume size %s', + size, volume['size']) + if size > volume['size']: + msg = _('Volume to be restored to is smaller ' + 'than the backup to be restored') + raise exception.InvalidVolume(reason=msg) + + LOG.audit(_("Overwriting volume %(volume_id)s with restore of " + "backup %(backup_id)s"), locals(), context=context) + + # Setting the status here rather than setting at start and unrolling + # for each error condition, it should be a very small window + self.db.backup_update(context, backup_id, {'status': 'restoring'}) + self.db.volume_update(context, volume_id, {'status': + 'restoring-backup'}) + self.backup_rpcapi.restore_backup(context, + backup['host'], + backup['id'], + volume_id) + + d = {'backup_id': backup_id, + 'volume_id': volume_id, } + + return d diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py new file mode 100755 index 0000000000..b4a972327b --- /dev/null +++ b/cinder/backup/manager.py @@ -0,0 +1,264 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Backup manager manages volume backups. + +Volume Backups are full copies of persistent volumes stored in Swift object +storage. They are usable without the original object being available. A +volume backup can be restored to the original volume it was created from or +any other available volume with a minimum size of the original volume. +Volume backups can be created, restored, deleted and listed. + +**Related Flags** + +:backup_topic: What :mod:`rpc` topic to listen to (default: + `cinder-backup`). +:backup_manager: The module name of a class derived from + :class:`manager.Manager` (default: + :class:`cinder.backup.manager.Manager`). + +""" + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +backup_manager_opts = [ + cfg.StrOpt('backup_service', + default='cinder.backup.services.swift', + help='Service to use for backups.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(backup_manager_opts) + + +class BackupManager(manager.SchedulerDependentManager): + """Manages backup of block storage devices.""" + + RPC_API_VERSION = '1.0' + + def __init__(self, service_name=None, *args, **kwargs): + self.service = importutils.import_module(FLAGS.backup_service) + self.az = FLAGS.storage_availability_zone + self.volume_manager = importutils.import_object(FLAGS.volume_manager) + self.driver = self.volume_manager.driver + super(BackupManager, self).__init__(service_name='backup', + *args, **kwargs) + self.driver.db = self.db + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service.""" + + ctxt = context.get_admin_context() + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + + LOG.info(_("Cleaning up incomplete backup operations")) + volumes = self.db.volume_get_all_by_host(ctxt, self.host) + for volume in volumes: + if volume['status'] == 'backing-up': + LOG.info(_('Resetting volume %s to available ' + '(was backing-up)') % volume['id']) + self.volume_manager.detach_volume(ctxt, volume['id']) + if volume['status'] == 'restoring-backup': + LOG.info(_('Resetting volume %s to error_restoring ' + '(was restoring-backup)') % volume['id']) + self.volume_manager.detach_volume(ctxt, volume['id']) + self.db.volume_update(ctxt, volume['id'], + {'status': 'error_restoring'}) + + # TODO(smulcahy) implement full resume of backup and restore + # operations on restart (rather than simply resetting) + backups = self.db.backup_get_all_by_host(ctxt, self.host) + for backup in backups: + if backup['status'] == 'creating': + LOG.info(_('Resetting backup %s to error ' + '(was creating)') % backup['id']) + err = 'incomplete backup reset on manager restart' + self.db.backup_update(ctxt, backup['id'], {'status': 'error', + 'fail_reason': err}) + if backup['status'] == 'restoring': + LOG.info(_('Resetting backup %s to available ' + '(was restoring)') % backup['id']) + self.db.backup_update(ctxt, backup['id'], + {'status': 'available'}) + if backup['status'] == 'deleting': + LOG.info(_('Resuming delete on backup: %s') % backup['id']) + self.delete_backup(ctxt, backup['id']) + + def create_backup(self, context, backup_id): + """ + Create volume backups using configured backup service. + """ + backup = self.db.backup_get(context, backup_id) + volume_id = backup['volume_id'] + volume = self.db.volume_get(context, volume_id) + LOG.info(_('create_backup started, backup: %(backup_id)s for ' + 'volume: %(volume_id)s') % locals()) + self.db.backup_update(context, backup_id, {'host': self.host, + 'service': + FLAGS.backup_service}) + + expected_status = 'backing-up' + actual_status = volume['status'] + if actual_status != expected_status: + err = _('create_backup aborted, expected volume status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidVolume(reason=err) + + expected_status = 'creating' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('create_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_service(context) + self.driver.backup_volume(context, backup, backup_service) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'available'}) + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': unicode(err)}) + + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'available', + 'size': volume['size'], + 'availability_zone': + self.az}) + LOG.info(_('create_backup finished. backup: %s'), backup_id) + + def restore_backup(self, context, backup_id, volume_id): + """ + Restore volume backups from configured backup service. + """ + LOG.info(_('restore_backup started, restoring backup: %(backup_id)s' + ' to volume: %(volume_id)s') % locals()) + backup = self.db.backup_get(context, backup_id) + volume = self.db.volume_get(context, volume_id) + self.db.backup_update(context, backup_id, {'host': self.host}) + + expected_status = 'restoring-backup' + actual_status = volume['status'] + if actual_status != expected_status: + err = _('restore_backup aborted, expected volume status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'available'}) + raise exception.InvalidVolume(reason=err) + + expected_status = 'restoring' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('restore_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + self.db.volume_update(context, volume_id, {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + if volume['size'] > backup['size']: + LOG.warn('volume: %s, size: %d is larger than backup: %s, ' + 'size: %d, continuing with restore', + volume['id'], volume['size'], + backup['id'], backup['size']) + + backup_service = backup['service'] + configured_service = FLAGS.backup_service + if backup_service != configured_service: + err = _('restore_backup aborted, the backup service currently' + ' configured [%(configured_service)s] is not the' + ' backup service that was used to create this' + ' backup [%(backup_service)s]') % locals() + self.db.backup_update(context, backup_id, {'status': 'available'}) + self.db.volume_update(context, volume_id, {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_service(context) + self.driver.restore_backup(context, backup, volume, + backup_service) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'error_restoring'}) + self.db.backup_update(context, backup_id, + {'status': 'available'}) + + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'available'}) + LOG.info(_('restore_backup finished, backup: %(backup_id)s restored' + ' to volume: %(volume_id)s') % locals()) + + def delete_backup(self, context, backup_id): + """ + Delete volume backup from configured backup service. + """ + backup = self.db.backup_get(context, backup_id) + LOG.info(_('delete_backup started, backup: %s'), backup_id) + self.db.backup_update(context, backup_id, {'host': self.host}) + + expected_status = 'deleting' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('delete_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidBackup(reason=err) + + backup_service = backup['service'] + if backup_service is not None: + configured_service = FLAGS.backup_service + if backup_service != configured_service: + err = _('delete_backup aborted, the backup service currently' + ' configured [%(configured_service)s] is not the' + ' backup service that was used to create this' + ' backup [%(backup_service)s]') % locals() + self.db.backup_update(context, backup_id, + {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_service(context) + backup_service.delete(backup) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': + unicode(err)}) + + context = context.elevated() + self.db.backup_destroy(context, backup_id) + LOG.info(_('delete_backup finished, backup %s deleted'), backup_id) diff --git a/cinder/backup/rpcapi.py b/cinder/backup/rpcapi.py new file mode 100644 index 0000000000..a0b8771bca --- /dev/null +++ b/cinder/backup/rpcapi.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the volume backup RPC API. +""" + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +LOG = logging.getLogger(__name__) + +FLAGS = flags.FLAGS + + +class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the volume rpc API. + + API version history: + + 1.0 - Initial version. + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self): + super(BackupAPI, self).__init__( + topic=FLAGS.backup_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_backup(self, ctxt, host, backup_id, volume_id): + LOG.debug("create_backup in rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + LOG.debug("create queue topic=%s", topic) + self.cast(ctxt, + self.make_msg('create_backup', + backup_id=backup_id), + topic=topic) + + def restore_backup(self, ctxt, host, backup_id, volume_id): + LOG.debug("restore_backup in rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + LOG.debug("restore queue topic=%s", topic) + self.cast(ctxt, + self.make_msg('restore_backup', + backup_id=backup_id, + volume_id=volume_id), + topic=topic) + + def delete_backup(self, ctxt, host, backup_id): + LOG.debug("delete_backup rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + self.cast(ctxt, + self.make_msg('delete_backup', + backup_id=backup_id), + topic=topic) diff --git a/cinder/backup/services/__init__.py b/cinder/backup/services/__init__.py new file mode 100644 index 0000000000..f745a135ae --- /dev/null +++ b/cinder/backup/services/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cinder/backup/services/swift.py b/cinder/backup/services/swift.py new file mode 100644 index 0000000000..43cbd1ff2d --- /dev/null +++ b/cinder/backup/services/swift.py @@ -0,0 +1,384 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of a backup service that uses Swift as the backend + +**Related Flags** + +:backup_swift_url: The URL of the Swift endpoint (default: + localhost:8080). +:backup_swift_object_size: The size in bytes of the Swift objects used + for volume backups (default: 52428800). +:backup_swift_retry_attempts: The number of retries to make for Swift + operations (default: 10). +:backup_swift_retry_backoff: The backoff time in seconds between retrying + failed Swift operations (default: 10). +:backup_compression_algorithm: Compression algorithm to use for volume + backups. Supported options are: + None (to disable), zlib and bz2 (default: zlib) +""" + +import hashlib +import httplib +import json +import os +import socket +import StringIO + +import eventlet +from oslo.config import cfg + +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from swiftclient import client as swift + +LOG = logging.getLogger(__name__) + +swiftbackup_service_opts = [ + cfg.StrOpt('backup_swift_url', + default='http://localhost:8080/v1/AUTH_', + help='The URL of the Swift endpoint'), + cfg.StrOpt('backup_swift_container', + default='volumebackups', + help='The default Swift container to use'), + cfg.IntOpt('backup_swift_object_size', + default=52428800, + help='The size in bytes of Swift backup objects'), + cfg.IntOpt('backup_swift_retry_attempts', + default=3, + help='The number of retries to make for Swift operations'), + cfg.IntOpt('backup_swift_retry_backoff', + default=2, + help='The backoff time in seconds between Swift retries'), + cfg.StrOpt('backup_compression_algorithm', + default='zlib', + help='Compression algorithm (None to disable)'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(swiftbackup_service_opts) + + +class SwiftBackupService(base.Base): + """Provides backup, restore and delete of backup objects within Swift.""" + + SERVICE_VERSION = '1.0.0' + SERVICE_VERSION_MAPPING = {'1.0.0': '_restore_v1'} + + def _get_compressor(self, algorithm): + try: + if algorithm.lower() in ('none', 'off', 'no'): + return None + elif algorithm.lower() in ('zlib', 'gzip'): + import zlib as compressor + return compressor + elif algorithm.lower() in ('bz2', 'bzip2'): + import bz2 as compressor + return compressor + except ImportError: + pass + + err = _('unsupported compression algorithm: %s') % algorithm + raise ValueError(unicode(err)) + + def __init__(self, context, db_driver=None): + self.context = context + self.swift_url = '%s%s' % (FLAGS.backup_swift_url, + self.context.project_id) + self.az = FLAGS.storage_availability_zone + self.data_block_size_bytes = FLAGS.backup_swift_object_size + self.swift_attempts = FLAGS.backup_swift_retry_attempts + self.swift_backoff = FLAGS.backup_swift_retry_backoff + self.compressor = \ + self._get_compressor(FLAGS.backup_compression_algorithm) + self.conn = swift.Connection(None, None, None, + retries=self.swift_attempts, + preauthurl=self.swift_url, + preauthtoken=self.context.auth_token, + starting_backoff=self.swift_backoff) + super(SwiftBackupService, self).__init__(db_driver) + + def _check_container_exists(self, container): + LOG.debug(_('_check_container_exists: container: %s') % container) + try: + self.conn.head_container(container) + except swift.ClientException as error: + if error.http_status == httplib.NOT_FOUND: + LOG.debug(_('container %s does not exist') % container) + return False + else: + raise + else: + LOG.debug(_('container %s exists') % container) + return True + + def _create_container(self, context, backup): + backup_id = backup['id'] + container = backup['container'] + LOG.debug(_('_create_container started, container: %(container)s,' + 'backup: %(backup_id)s') % locals()) + if container is None: + container = FLAGS.backup_swift_container + self.db.backup_update(context, backup_id, {'container': container}) + if not self._check_container_exists(container): + self.conn.put_container(container) + return container + + def _generate_swift_object_name_prefix(self, backup): + az = 'az_%s' % self.az + backup_name = '%s_backup_%s' % (az, backup['id']) + volume = 'volume_%s' % (backup['volume_id']) + timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S") + prefix = volume + '/' + timestamp + '/' + backup_name + LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix) + return prefix + + def _generate_object_names(self, backup): + prefix = backup['service_metadata'] + swift_objects = self.conn.get_container(backup['container'], + prefix=prefix, + full_listing=True)[1] + swift_object_names = [] + for swift_object in swift_objects: + swift_object_names.append(swift_object['name']) + LOG.debug(_('generated object list: %s') % swift_object_names) + return swift_object_names + + def _metadata_filename(self, backup): + swift_object_name = backup['service_metadata'] + filename = '%s_metadata' % swift_object_name + return filename + + def _write_metadata(self, backup, volume_id, container, object_list): + filename = self._metadata_filename(backup) + LOG.debug(_('_write_metadata started, container name: %(container)s,' + ' metadata filename: %(filename)s') % locals()) + metadata = {} + metadata['version'] = self.SERVICE_VERSION + metadata['backup_id'] = backup['id'] + metadata['volume_id'] = volume_id + metadata['backup_name'] = backup['display_name'] + metadata['backup_description'] = backup['display_description'] + metadata['created_at'] = str(backup['created_at']) + metadata['objects'] = object_list + metadata_json = json.dumps(metadata, sort_keys=True, indent=2) + reader = StringIO.StringIO(metadata_json) + etag = self.conn.put_object(container, filename, reader) + md5 = hashlib.md5(metadata_json).hexdigest() + if etag != md5: + err = _('error writing metadata file to swift, MD5 of metadata' + ' file in swift [%(etag)s] is not the same as MD5 of ' + 'metadata file sent to swift [%(md5)s]') % locals() + raise exception.InvalidBackup(reason=err) + LOG.debug(_('_write_metadata finished')) + + def _read_metadata(self, backup): + container = backup['container'] + filename = self._metadata_filename(backup) + LOG.debug(_('_read_metadata started, container name: %(container)s, ' + 'metadata filename: %(filename)s') % locals()) + (resp, body) = self.conn.get_object(container, filename) + metadata = json.loads(body) + LOG.debug(_('_read_metadata finished (%s)') % metadata) + return metadata + + def backup(self, backup, volume_file): + """Backup the given volume to swift using the given backup metadata.""" + backup_id = backup['id'] + volume_id = backup['volume_id'] + volume = self.db.volume_get(self.context, volume_id) + + if volume['size'] <= 0: + err = _('volume size %d is invalid.') % volume['size'] + raise exception.InvalidVolume(reason=err) + + try: + container = self._create_container(self.context, backup) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + + object_prefix = self._generate_swift_object_name_prefix(backup) + backup['service_metadata'] = object_prefix + self.db.backup_update(self.context, backup_id, {'service_metadata': + object_prefix}) + volume_size_bytes = volume['size'] * 1024 * 1024 * 1024 + availability_zone = self.az + LOG.debug(_('starting backup of volume: %(volume_id)s to swift,' + ' volume size: %(volume_size_bytes)d, swift object names' + ' prefix %(object_prefix)s, availability zone:' + ' %(availability_zone)s') % locals()) + object_id = 1 + object_list = [] + while True: + data_block_size_bytes = self.data_block_size_bytes + object_name = '%s-%05d' % (object_prefix, object_id) + obj = {} + obj[object_name] = {} + obj[object_name]['offset'] = volume_file.tell() + data = volume_file.read(data_block_size_bytes) + obj[object_name]['length'] = len(data) + if data == '': + break + LOG.debug(_('reading chunk of data from volume')) + if self.compressor is not None: + algorithm = FLAGS.backup_compression_algorithm.lower() + obj[object_name]['compression'] = algorithm + data_size_bytes = len(data) + data = self.compressor.compress(data) + comp_size_bytes = len(data) + LOG.debug(_('compressed %(data_size_bytes)d bytes of data' + ' to %(comp_size_bytes)d bytes using ' + '%(algorithm)s') % locals()) + else: + LOG.debug(_('not compressing data')) + obj[object_name]['compression'] = 'none' + + reader = StringIO.StringIO(data) + LOG.debug(_('About to put_object')) + try: + etag = self.conn.put_object(container, object_name, reader) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % locals()) + md5 = hashlib.md5(data).hexdigest() + obj[object_name]['md5'] = md5 + LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % locals()) + if etag != md5: + err = _('error writing object to swift, MD5 of object in ' + 'swift %(etag)s is not the same as MD5 of object sent ' + 'to swift %(md5)s') % locals() + raise exception.InvalidBackup(reason=err) + object_list.append(obj) + object_id += 1 + LOG.debug(_('Calling eventlet.sleep(0)')) + eventlet.sleep(0) + try: + self._write_metadata(backup, volume_id, container, object_list) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + self.db.backup_update(self.context, backup_id, {'object_count': + object_id}) + LOG.debug(_('backup %s finished.') % backup_id) + + def _restore_v1(self, backup, volume_id, metadata, volume_file): + """Restore a v1 swift volume backup from swift.""" + backup_id = backup['id'] + LOG.debug(_('v1 swift volume backup restore of %s started'), backup_id) + container = backup['container'] + metadata_objects = metadata['objects'] + metadata_object_names = [] + for metadata_object in metadata_objects: + metadata_object_names.extend(metadata_object.keys()) + LOG.debug(_('metadata_object_names = %s') % metadata_object_names) + prune_list = [self._metadata_filename(backup)] + swift_object_names = [swift_object_name for swift_object_name in + self._generate_object_names(backup) + if swift_object_name not in prune_list] + if sorted(swift_object_names) != sorted(metadata_object_names): + err = _('restore_backup aborted, actual swift object list in ' + 'swift does not match object list stored in metadata') + raise exception.InvalidBackup(reason=err) + + for metadata_object in metadata_objects: + object_name = metadata_object.keys()[0] + LOG.debug(_('restoring object from swift. backup: %(backup_id)s, ' + 'container: %(container)s, swift object name: ' + '%(object_name)s, volume: %(volume_id)s') % locals()) + try: + (resp, body) = self.conn.get_object(container, object_name) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + compression_algorithm = metadata_object[object_name]['compression'] + decompressor = self._get_compressor(compression_algorithm) + if decompressor is not None: + LOG.debug(_('decompressing data using %s algorithm') % + compression_algorithm) + decompressed = decompressor.decompress(body) + volume_file.write(decompressed) + else: + volume_file.write(body) + + # force flush every write to avoid long blocking write on close + volume_file.flush() + os.fsync(volume_file.fileno()) + # Restoring a backup to a volume can take some time. Yield so other + # threads can run, allowing for among other things the service + # status to be updated + eventlet.sleep(0) + LOG.debug(_('v1 swift volume backup restore of %s finished'), + backup_id) + + def restore(self, backup, volume_id, volume_file): + """Restore the given volume backup from swift.""" + backup_id = backup['id'] + container = backup['container'] + object_prefix = backup['service_metadata'] + LOG.debug(_('starting restore of backup %(object_prefix)s from swift' + ' container: %(container)s, to volume %(volume_id)s, ' + 'backup: %(backup_id)s') % locals()) + try: + metadata = self._read_metadata(backup) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + metadata_version = metadata['version'] + LOG.debug(_('Restoring swift backup version %s'), metadata_version) + try: + restore_func = getattr(self, self.SERVICE_VERSION_MAPPING.get( + metadata_version)) + except TypeError: + err = (_('No support to restore swift backup version %s') + % metadata_version) + raise exception.InvalidBackup(reason=err) + restore_func(backup, volume_id, metadata, volume_file) + LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') % + locals()) + + def delete(self, backup): + """Delete the given backup from swift.""" + container = backup['container'] + LOG.debug('delete started, backup: %s, container: %s, prefix: %s', + backup['id'], container, backup['service_metadata']) + + if container is not None: + swift_object_names = [] + try: + swift_object_names = self._generate_object_names(backup) + except Exception: + LOG.warn(_('swift error while listing objects, continuing' + ' with delete')) + + for swift_object_name in swift_object_names: + try: + self.conn.delete_object(container, swift_object_name) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + except Exception: + LOG.warn(_('swift error while deleting object %s, ' + 'continuing with delete') % swift_object_name) + else: + LOG.debug(_('deleted swift object: %(swift_object_name)s' + ' in container: %(container)s') % locals()) + # Deleting a backup's objects from swift can take some time. + # Yield so other threads can run + eventlet.sleep(0) + + LOG.debug(_('delete %s finished') % backup['id']) + + +def get_backup_service(context): + return SwiftBackupService(context) diff --git a/cinder/brick/__init__.py b/cinder/brick/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/brick/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/brick/iscsi/__init__.py b/cinder/brick/iscsi/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/brick/iscsi/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/brick/iscsi/iscsi.py b/cinder/brick/iscsi/iscsi.py new file mode 100644 index 0000000000..5748697075 --- /dev/null +++ b/cinder/brick/iscsi/iscsi.py @@ -0,0 +1,468 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Helper code for the iSCSI volume driver. + +""" +import os +import re + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import utils as volume_utils + +LOG = logging.getLogger(__name__) + +iscsi_helper_opt = [cfg.StrOpt('iscsi_helper', + default='tgtadm', + help='iscsi target user-land tool to use'), + cfg.StrOpt('volumes_dir', + default='$state_path/volumes', + help='Volume configuration file storage ' + 'directory'), + cfg.StrOpt('iet_conf', + default='/etc/iet/ietd.conf', + help='IET configuration file'), + cfg.StrOpt('lio_initiator_iqns', + default='', + help=('Comma-separatd list of initiator IQNs ' + 'allowed to connect to the ' + 'iSCSI target. (From Nova compute nodes.)' + ) + ), + cfg.StrOpt('iscsi_iotype', + default='fileio', + help=('Sets the behavior of the iSCSI target ' + 'to either perform blockio or fileio ' + 'optionally, auto can be set and Cinder ' + 'will autodetect type of backing device') + ) + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(iscsi_helper_opt) +FLAGS.import_opt('volume_name_template', 'cinder.db') + + +class TargetAdmin(object): + """iSCSI target administration. + + Base class for iSCSI target admin helpers. + """ + + def __init__(self, cmd, execute): + self._cmd = cmd + self.set_execute(execute) + + def set_execute(self, execute): + """Set the function to be used to execute commands.""" + self._execute = execute + + def _run(self, *args, **kwargs): + self._execute(self._cmd, *args, run_as_root=True, **kwargs) + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + """Create a iSCSI target and logical unit""" + raise NotImplementedError() + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + """Remove a iSCSI target and logical unit""" + raise NotImplementedError() + + def _new_target(self, name, tid, **kwargs): + """Create a new iSCSI target.""" + raise NotImplementedError() + + def _delete_target(self, tid, **kwargs): + """Delete a target.""" + raise NotImplementedError() + + def show_target(self, tid, iqn=None, **kwargs): + """Query the given target ID.""" + raise NotImplementedError() + + def _new_logicalunit(self, tid, lun, path, **kwargs): + """Create a new LUN on a target using the supplied path.""" + raise NotImplementedError() + + def _delete_logicalunit(self, tid, lun, **kwargs): + """Delete a logical unit from a target.""" + raise NotImplementedError() + + +class TgtAdm(TargetAdmin): + """iSCSI target administration using tgtadm.""" + + def __init__(self, execute=utils.execute): + super(TgtAdm, self).__init__('tgtadm', execute) + + def _get_target(self, iqn): + (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + lines = out.split('\n') + for line in lines: + if iqn in line: + parsed = line.split() + tid = parsed[1] + return tid[:-1] + + return None + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + # Note(jdg) tid and lun aren't used by TgtAdm but remain for + # compatibility + + utils.ensure_tree(FLAGS.volumes_dir) + + vol_id = name.split(':')[1] + if chap_auth is None: + volume_conf = """ + + backing-store %s + + """ % (name, path) + else: + volume_conf = """ + + backing-store %s + %s + + """ % (name, path, chap_auth) + + LOG.info(_('Creating iscsi_target for: %s') % vol_id) + volumes_dir = FLAGS.volumes_dir + volume_path = os.path.join(volumes_dir, vol_id) + + f = open(volume_path, 'w+') + f.write(volume_conf) + f.close() + + old_persist_file = None + old_name = kwargs.get('old_name', None) + if old_name is not None: + old_persist_file = os.path.join(volumes_dir, old_name) + + try: + (out, err) = self._execute('tgt-admin', + '--update', + name, + run_as_root=True) + except exception.ProcessExecutionError, e: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + + #Don't forget to remove the persistent file we created + os.unlink(volume_path) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id) + tid = self._get_target(iqn) + if tid is None: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s. Please ensure your tgtd config file " + "contains 'include %(volumes_dir)s/*'") % locals()) + raise exception.NotFound() + + if old_persist_file is not None and os.path.exists(old_persist_file): + os.unlink(old_persist_file) + + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + LOG.info(_('Removing iscsi_target for: %s') % vol_id) + vol_uuid_file = FLAGS.volume_name_template % vol_id + volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file) + if os.path.isfile(volume_path): + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, + vol_uuid_file) + else: + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + try: + # NOTE(vish): --force is a workaround for bug: + # https://bugs.launchpad.net/cinder/+bug/1159948 + self._execute('tgt-admin', + '--force', + '--delete', + iqn, + run_as_root=True) + except exception.ProcessExecutionError, e: + LOG.error(_("Failed to remove iscsi target for volume " + "id:%(vol_id)s.") % locals()) + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + + os.unlink(volume_path) + + def show_target(self, tid, iqn=None, **kwargs): + if iqn is None: + raise exception.InvalidParameterValue( + err=_('valid iqn needed for show_target')) + + tid = self._get_target(iqn) + if tid is None: + raise exception.NotFound() + + +class IetAdm(TargetAdmin): + """iSCSI target administration using ietadm.""" + + def __init__(self, execute=utils.execute): + super(IetAdm, self).__init__('ietadm', execute) + + def _iotype(self, path): + if FLAGS.iscsi_iotype == 'auto': + return 'blockio' if volume_utils.is_block(path) else 'fileio' + else: + return FLAGS.iscsi_iotype + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + + # NOTE (jdg): Address bug: 1175207 + kwargs.pop('old_name', None) + + self._new_target(name, tid, **kwargs) + self._new_logicalunit(tid, lun, path, **kwargs) + if chap_auth is not None: + (type, username, password) = chap_auth.split() + self._new_auth(tid, type, username, password, **kwargs) + + conf_file = FLAGS.iet_conf + if os.path.exists(conf_file): + try: + volume_conf = """ + Target %s + %s + Lun 0 Path=%s,Type=%s + """ % (name, chap_auth, path, self._iotype(path)) + + with utils.temporary_chown(conf_file): + f = open(conf_file, 'a+') + f.write(volume_conf) + f.close() + except exception.ProcessExecutionError, e: + vol_id = name.split(':')[1] + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + LOG.info(_('Removing iscsi_target for volume: %s') % vol_id) + self._delete_logicalunit(tid, lun, **kwargs) + self._delete_target(tid, **kwargs) + vol_uuid_file = FLAGS.volume_name_template % vol_id + conf_file = FLAGS.iet_conf + if os.path.exists(conf_file): + with utils.temporary_chown(conf_file): + try: + iet_conf_text = open(conf_file, 'r+') + full_txt = iet_conf_text.readlines() + new_iet_conf_txt = [] + count = 0 + for line in full_txt: + if count > 0: + count -= 1 + continue + elif re.search(vol_uuid_file, line): + count = 2 + continue + else: + new_iet_conf_txt.append(line) + + iet_conf_text.seek(0) + iet_conf_text.truncate(0) + iet_conf_text.writelines(new_iet_conf_txt) + finally: + iet_conf_text.close() + + def _new_target(self, name, tid, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--params', 'Name=%s' % name, + **kwargs) + + def _delete_target(self, tid, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + **kwargs) + + def show_target(self, tid, iqn=None, **kwargs): + self._run('--op', 'show', + '--tid=%s' % tid, + **kwargs) + + def _new_logicalunit(self, tid, lun, path, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--lun=%d' % lun, + '--params', 'Path=%s,Type=%s' % (path, self._iotype(path)), + **kwargs) + + def _delete_logicalunit(self, tid, lun, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + '--lun=%d' % lun, + **kwargs) + + def _new_auth(self, tid, type, username, password, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--user', + '--params=%s=%s,Password=%s' % (type, username, password), + **kwargs) + + +class FakeIscsiHelper(object): + + def __init__(self): + self.tid = 1 + + def set_execute(self, execute): + self._execute = execute + + def create_iscsi_target(self, *args, **kwargs): + self.tid += 1 + return self.tid + + +class LioAdm(TargetAdmin): + """iSCSI target administration for LIO using python-rtslib.""" + def __init__(self, execute=utils.execute): + super(LioAdm, self).__init__('rtstool', execute) + + try: + self._execute('rtstool', 'verify') + except (OSError, exception.ProcessExecutionError): + LOG.error(_('rtstool is not installed correctly')) + raise + + def _get_target(self, iqn): + (out, err) = self._execute('rtstool', + 'get-targets', + run_as_root=True) + lines = out.split('\n') + for line in lines: + if iqn in line: + return line + + return None + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + # tid and lun are not used + + vol_id = name.split(':')[1] + + LOG.info(_('Creating iscsi_target for volume: %s') % vol_id) + + # rtstool requires chap_auth, but unit tests don't provide it + chap_auth_userid = 'test_id' + chap_auth_password = 'test_pass' + + if chap_auth != None: + (chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:] + + extra_args = [] + if FLAGS.lio_initiator_iqns: + extra_args.append(FLAGS.lio_initiator_iqns) + + try: + command_args = ['rtstool', + 'create', + path, + name, + chap_auth_userid, + chap_auth_password] + if extra_args != []: + command_args += extra_args + self._execute(*command_args, run_as_root=True) + except exception.ProcessExecutionError as e: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + LOG.error("%s" % str(e)) + + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id) + tid = self._get_target(iqn) + if tid is None: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + raise exception.NotFound() + + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + LOG.info(_('Removing iscsi_target: %s') % vol_id) + vol_uuid_name = 'volume-%s' % vol_id + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_uuid_name) + + try: + self._execute('rtstool', + 'delete', + iqn, + run_as_root=True) + except exception.ProcessExecutionError as e: + LOG.error(_("Failed to remove iscsi target for volume " + "id:%(vol_id)s.") % locals()) + LOG.error("%s" % str(e)) + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + + def show_target(self, tid, iqn=None, **kwargs): + if iqn is None: + raise exception.InvalidParameterValue( + err=_('valid iqn needed for show_target')) + + tid = self._get_target(iqn) + if tid is None: + raise exception.NotFound() + + def initialize_connection(self, volume, connector): + volume_iqn = volume['provider_location'].split(' ')[1] + + (auth_method, auth_user, auth_pass) = \ + volume['provider_auth'].split(' ', 3) + + # Add initiator iqns to target ACL + try: + self._execute('rtstool', 'add-initiator', + volume_iqn, + auth_user, + auth_pass, + connector['initiator'], + run_as_root=True) + except exception.ProcessExecutionError as e: + LOG.error(_("Failed to add initiator iqn %s to target") % + connector['initiator']) + raise exception.ISCSITargetAttachFailed(volume_id=volume['id']) + + +def get_target_admin(): + if FLAGS.iscsi_helper == 'tgtadm': + return TgtAdm() + elif FLAGS.iscsi_helper == 'fake': + return FakeIscsiHelper() + elif FLAGS.iscsi_helper == 'lioadm': + return LioAdm() + else: + return IetAdm() diff --git a/cinder/brick/local_dev/__init__.py b/cinder/brick/local_dev/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/brick/local_dev/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py new file mode 100644 index 0000000000..e01104427f --- /dev/null +++ b/cinder/brick/local_dev/lvm.py @@ -0,0 +1,368 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +LVM class for performing LVM operations. +""" + +import math + +from itertools import izip + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + + +class VolumeGroupNotFound(Exception): + def __init__(self, vg_name): + message = (_('Unable to find Volume Group: %s') % vg_name) + super(VolumeGroupNotFound, self).__init__(message) + + +class VolumeGroupCreationFailed(Exception): + def __init__(self, vg_name): + message = (_('Failed to create Volume Group: %s') % vg_name) + super(VolumeGroupCreationFailed, self).__init__(message) + + +class LVM(object): + """LVM object to enable various LVM related operations.""" + + def __init__(self, vg_name, create_vg=False, + physical_volumes=None): + """Initialize the LVM object. + + The LVM object is based on an LVM VolumeGroup, one instantiation + for each VolumeGroup you have/use. + + :param vg_name: Name of existing VG or VG to create + :param create_vg: Indicates the VG doesn't exist + and we want to create it + :param physical_volumes: List of PVs to build VG on + + """ + self.vg_name = vg_name + self.pv_list = [] + self.lv_list = [] + self.vg_size = 0 + self.vg_available_space = 0 + self.vg_lv_count = 0 + self.vg_uuid = None + + if create_vg and physical_volumes is not None: + self.pv_list = physical_volumes + + try: + self._create_vg(physical_volumes) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error creating Volume Group')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise VolumeGroupCreationFailed(vg_name=self.vg_name) + + if self._vg_exists() is False: + LOG.error(_('Unable to locate Volume Group %s') % vg_name) + raise VolumeGroupNotFound(vg_name=vg_name) + + def _size_str(self, size_in_g): + if '.00' in size_in_g: + size_in_g = size_in_g.replace('.00', '') + + if int(size_in_g) == 0: + return '100M' + + return '%sG' % size_in_g + + def _vg_exists(self): + """Simple check to see if VG exists. + + :returns: True if vg specified in object exists, else False + + """ + exists = False + cmd = ['vgs', '--noheadings', '-o', 'name'] + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + if out is not None: + volume_groups = out.split() + if self.vg_name in volume_groups: + exists = True + + return exists + + def _create_vg(self, pv_list): + cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] + putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + def _get_vg_uuid(self): + (out, err) = putils.execute('vgs', '--noheadings', + '-o uuid', self.vg_name) + if out is not None: + return out.split() + else: + return [] + + @staticmethod + def supports_thin_provisioning(): + """Static method to check for thin LVM support on a system. + + :returns: True if supported, False otherwise + + """ + cmd = ['vgs', '--version'] + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + lines = out.split('\n') + + for line in lines: + if 'LVM version' in line: + version_list = line.split() + version = version_list[2] + if '(2)' in version: + version = version.replace('(2)', '') + version_tuple = tuple(map(int, version.split('.'))) + if version_tuple >= (2, 2, 95): + return True + return False + + @staticmethod + def get_all_volumes(vg_name=None): + """Static method to get all LV's on a system. + + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with LV info + + """ + cmd = ['lvs', '--noheadings', '-o', 'vg_name,name,size'] + if vg_name is not None: + cmd += [vg_name] + + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + lv_list = [] + if out is not None: + volumes = out.split() + for vg, name, size in izip(*[iter(volumes)] * 3): + lv_list.append({"vg": vg, "name": name, "size": size}) + + return lv_list + + def get_volumes(self): + """Get all LV's associated with this instantiation (VG). + + :returns: List of Dictionaries with LV info + + """ + self.lv_list = self.get_all_volumes(self.vg_name) + return self.lv_list + + def get_volume(self, name): + """Get reference object of volume specified by name. + + :returns: dict representation of Logical Volume if exists + + """ + ref_list = self.get_volumes() + for r in ref_list: + if r['name'] == name: + return r + + @staticmethod + def get_all_physical_volumes(vg_name=None): + """Static method to get all PVs on a system. + + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with PV info + + """ + cmd = ['pvs', '--noheadings', + '-o', 'vg_name,name,size,free', + '--separator', ':'] + if vg_name is not None: + cmd += [vg_name] + + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + pv_list = [] + if out is not None: + pvs = out.split() + for pv in pvs: + fields = pv.split(':') + pv_list.append({'vg': fields[0], + 'name': fields[1], + 'size': fields[2], + 'available': fields[3]}) + + return pv_list + + def get_physical_volumes(self): + """Get all PVs associated with this instantiation (VG). + + :returns: List of Dictionaries with PV info + + """ + self.pv_list = self.get_all_physical_volumes(self.vg_name) + return self.pv_list + + @staticmethod + def get_all_volume_groups(vg_name=None): + """Static method to get all VGs on a system. + + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with VG info + + """ + cmd = ['vgs', '--noheadings', + '-o', 'name,size,free,lv_count,uuid', + '--separator', ':'] + if vg_name is not None: + cmd += [vg_name] + + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + vg_list = [] + if out is not None: + vgs = out.split() + for vg in vgs: + fields = vg.split(':') + vg_list.append({'name': fields[0], + 'size': fields[1], + 'available': fields[2], + 'lv_count': fields[3], + 'uuid': fields[4]}) + + return vg_list + + def update_volume_group_info(self): + """Update VG info for this instantiation. + + Used to update member fields of object and + provide a dict of info for caller. + + :returns: Dictionaries of VG info + + """ + vg_list = self.get_all_volume_groups(self.vg_name) + + if len(vg_list) != 1: + LOG.error(_('Unable to find VG: %s') % self.vg_name) + raise VolumeGroupNotFound(vg_name=self.vg_name) + + self.vg_size = vg_list[0]['size'] + self.vg_available_space = vg_list[0]['available'] + self.vg_lv_count = vg_list[0]['lv_count'] + self.vg_uuid = vg_list[0]['uuid'] + + return vg_list[0] + + def create_thin_pool(self, name=None, size_str=0): + """Creates a thin provisioning pool for this VG. + + :param name: Name to use for pool, default is "-pool" + :param size_str: Size to allocate for pool, default is entire VG + + """ + + if not self.supports_thin_provisioning(): + LOG.error(_('Requested to setup thin provisioning, ' + 'however current LVM version does not ' + 'support it.')) + return None + + if name is None: + name = '%s-pool' % self.vg_name + + if size_str == 0: + self.update_volume_group_info() + size_str = self.vg_size + + self.create_volume(name, size_str, 'thin') + + def create_volume(self, name, size_str, lv_type='default', mirror_count=0): + """Creates a logical volume on the object's VG. + + :param name: Name to use when creating Logical Volume + :param size_str: Size to use when creating Logical Volume + :param lv_type: Type of Volume (default or thin) + :param mirror_count: Use LVM mirroring with specified count + + """ + size = self._size_str(size_str) + cmd = ['lvcreate', '-n', name, self.vg_name] + if lv_type == 'thin': + cmd += ['-T', '-V', size] + else: + cmd += ['-L', size] + + if mirror_count > 0: + cmd += ['-m', mirror_count, '--nosync'] + terras = int(size[:-1]) / 1024.0 + if terras >= 1.5: + rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) + # NOTE(vish): Next power of two for region size. See: + # http://red.ht/U2BPOD + cmd += ['-R', str(rsize)] + + putils.execute(*cmd, + root_helper='sudo', + run_as_root=True) + + def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): + """Creates a snapshot of a logical volume. + + :param name: Name to assign to new snapshot + :param source_lv_name: Name of Logical Volume to snapshot + :param lv_type: Type of LV (default or thin) + + """ + source_lvref = self.get_volume(source_lv_name) + if source_lvref is None: + LOG.error(_("Unable to find LV: %s") % source_lv_name) + return False + cmd = ['lvcreate', '--name', name, + '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] + if lv_type != 'thin': + size = source_lvref['size'] + cmd += ['-L', size] + + putils.execute(*cmd, + root_helper='sudo', + run_as_root=True) + + def delete(self, name): + """Delete logical volume or snapshot. + + :param name: Name of LV to delete + + """ + putils.execute('lvremove', + '-f', + '%s/%s' % (self.vg_name, name), + root_helper='sudo', run_as_root=True) + + def revert(self, snapshot_name): + """Revert an LV from snapshot. + + :param snapshot_name: Name of snapshot to revert + + """ + putils.execute('lvconvert', '--merge', + snapshot_name, root_helper='sudo', + run_as_root=True) diff --git a/cinder/common/__init__.py b/cinder/common/__init__.py new file mode 100644 index 0000000000..0a3b98867a --- /dev/null +++ b/cinder/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/common/sqlalchemyutils.py b/cinder/common/sqlalchemyutils.py new file mode 100755 index 0000000000..19b7ca9ea7 --- /dev/null +++ b/cinder/common/sqlalchemyutils.py @@ -0,0 +1,128 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack LLC. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of paginate query.""" + +import sqlalchemy + +from cinder import exception +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +# copied from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise exception.InvalidInput(reason='Invalid sort key') + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in xrange(0, len(sort_keys)): + crit_attrs = [] + for j in xrange(0, i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + elif sort_dirs[i] == 'asc': + crit_attrs.append((model_attr > marker_values[i])) + else: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query diff --git a/cinder/compute/__init__.py b/cinder/compute/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/compute/aggregate_states.py b/cinder/compute/aggregate_states.py new file mode 100644 index 0000000000..92e1940277 --- /dev/null +++ b/cinder/compute/aggregate_states.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible states for host aggregates. + +An aggregate may be 'created', in which case the admin has triggered its +creation, but the underlying hypervisor pool has not actually being set up +yet. An aggregate may be 'changing', meaning that the underlying hypervisor +pool is being setup. An aggregate may be 'active', in which case the underlying +hypervisor pool is up and running. An aggregate may be 'dismissed' when it has +no hosts and it has been deleted. An aggregate may be in 'error' in all other +cases. +A 'created' aggregate becomes 'changing' during the first request of +adding a host. During a 'changing' status no other requests will be accepted; +this is to allow the hypervisor layer to instantiate the underlying pool +without any potential race condition that may incur in master/slave-based +configurations. The aggregate goes into the 'active' state when the underlying +pool has been correctly instantiated. +All other operations (e.g. add/remove hosts) that succeed will keep the +aggregate in the 'active' state. If a number of continuous requests fail, +an 'active' aggregate goes into an 'error' state. To recover from such a state, +admin intervention is required. Currently an error state is irreversible, +that is, in order to recover from it an aggregate must be deleted. +""" + +CREATED = 'created' +CHANGING = 'changing' +ACTIVE = 'active' +ERROR = 'error' +DISMISSED = 'dismissed' diff --git a/cinder/context.py b/cinder/context.py new file mode 100644 index 0000000000..951496043e --- /dev/null +++ b/cinder/context.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RequestContext: context for requests that persist through all of cinder.""" + +import copy +import uuid + +from cinder.openstack.common import local +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import policy + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None, overwrite=True, + quota_class=None, **kwargs): + """ + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.is_admin = is_admin + if self.is_admin is None: + self.is_admin = policy.check_is_admin(self.roles) + elif self.is_admin and 'admin' not in self.roles: + self.roles.append('admin') + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = timeutils.utcnow() + if isinstance(timestamp, basestring): + timestamp = timeutils.parse_strtime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + self.auth_token = auth_token + self.quota_class = quota_class + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': timeutils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'quota_class': self.quota_class, + 'tenant': self.tenant, + 'user': self.user} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in context.roles: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + # NOTE(sirp): the openstack/common version of RequestContext uses + # tenant/user whereas the Cinder version uses project_id/user_id. We need + # this shim in order to use context-aware code from openstack/common, like + # logging, until we make the switch to using openstack/common's version of + # RequestContext. + @property + def tenant(self): + return self.project_id + + @property + def user(self): + return self.user_id + + +def get_admin_context(read_deleted="no"): + return RequestContext(user_id=None, + project_id=None, + is_admin=True, + read_deleted=read_deleted, + overwrite=False) diff --git a/cinder/db/__init__.py b/cinder/db/__init__.py new file mode 100644 index 0000000000..f4eb417ec9 --- /dev/null +++ b/cinder/db/__init__.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +DB abstraction for Cinder +""" + +from cinder.db.api import * diff --git a/cinder/db/api.py b/cinder/db/api.py new file mode 100644 index 0000000000..dd546e64b9 --- /dev/null +++ b/cinder/db/api.py @@ -0,0 +1,883 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Defines interface for DB access. + +The underlying driver is loaded as a :class:`LazyPluggable`. + +Functions in this module are imported into the cinder.db namespace. Call these +functions from cinder.db namespace, not the cinder.db.api namespace. + +All functions in this module return objects that implement a dictionary-like +interface. Currently, many of these objects are sqlalchemy objects that +implement a dictionary interface. However, a future goal is to have all of +these objects be simple dictionaries. + + +**Related Flags** + +:db_backend: string to lookup in the list of LazyPluggable backends. + `sqlalchemy` is the only supported backend right now. + +:sql_connection: string specifying the sqlalchemy connection to use, like: + `sqlite:///var/lib/cinder/cinder.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: True) + +""" + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder import utils + +db_opts = [ + cfg.StrOpt('db_backend', + default='sqlalchemy', + help='The backend to use for db'), + cfg.BoolOpt('enable_new_services', + default=True, + help='Services to be added to the available pool on create'), + cfg.StrOpt('volume_name_template', + default='volume-%s', + help='Template string to be used to generate volume names'), + cfg.StrOpt('share_name_template', + default='share-%s', + help='Template string to be used to generate share names'), + cfg.StrOpt('share_snapshot_name_template', + default='share-snapshot-%s', + help='Template string to be used to generate share snapshot ' + 'names'), + cfg.StrOpt('snapshot_name_template', + default='snapshot-%s', + help='Template string to be used to generate snapshot names'), + cfg.StrOpt('backup_name_template', + default='backup-%s', + help='Template string to be used to generate backup names'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(db_opts) + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.api') + + +class NoMoreTargets(exception.CinderException): + """No more available targets""" + pass + + +################### + + +def service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + return IMPL.service_destroy(context, service_id) + + +def service_get(context, service_id): + """Get a service or raise if it does not exist.""" + return IMPL.service_get(context, service_id) + + +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to.""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + +def service_get_all(context, disabled=None): + """Get all services.""" + return IMPL.service_get_all(context, disabled) + + +def service_get_all_by_topic(context, topic): + """Get all services for a given topic.""" + return IMPL.service_get_all_by_topic(context, topic) + + +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + +def service_get_all_volume_sorted(context): + """Get all volume services sorted by volume count. + + :returns: a list of (Service, volume_count) tuples. + + """ + return IMPL.service_get_all_volume_sorted(context) + + +def service_get_all_share_sorted(context): + """Get all share services sorted by share count. + + :returns: a list of (Service, share_count) tuples. + + """ + return IMPL.service_get_all_share_sorted(context) + + +def service_get_by_args(context, host, binary): + """Get the state of an service by node name and binary.""" + return IMPL.service_get_by_args(context, host, binary) + + +def service_create(context, values): + """Create a service from the values dictionary.""" + return IMPL.service_create(context, values) + + +def service_update(context, service_id, values): + """Set the given properties on an service and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, service_id, values) + + +################### +def migration_update(context, id, values): + """Update a migration instance.""" + return IMPL.migration_update(context, id, values) + + +def migration_create(context, values): + """Create a migration record.""" + return IMPL.migration_create(context, values) + + +def migration_get(context, migration_id): + """Finds a migration by the id.""" + return IMPL.migration_get(context, migration_id) + + +def migration_get_by_instance_and_status(context, instance_uuid, status): + """Finds a migration by the instance uuid its migrating.""" + return IMPL.migration_get_by_instance_and_status(context, + instance_uuid, + status) + + +def migration_get_all_unconfirmed(context, confirm_window): + """Finds all unconfirmed migrations within the confirmation window.""" + return IMPL.migration_get_all_unconfirmed(context, confirm_window) + + +################### + + +def iscsi_target_count_by_host(context, host): + """Return count of export devices.""" + return IMPL.iscsi_target_count_by_host(context, host) + + +def iscsi_target_create_safe(context, values): + """Create an iscsi_target from the values dictionary. + + The device is not returned. If the create violates the unique + constraints because the iscsi_target and host already exist, + no exception is raised. + + """ + return IMPL.iscsi_target_create_safe(context, values) + + +############### + +def volume_allocate_iscsi_target(context, volume_id, host): + """Atomically allocate a free iscsi_target from the pool.""" + return IMPL.volume_allocate_iscsi_target(context, volume_id, host) + + +def volume_attached(context, volume_id, instance_id, mountpoint): + """Ensure that a volume is set as attached.""" + return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) + + +def volume_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.volume_create(context, values) + + +def volume_data_get_for_host(context, host, session=None): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_host(context, + host, + session) + + +def volume_data_get_for_project(context, project_id, session=None): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_project(context, + project_id, + session) + + +def volume_destroy(context, volume_id): + """Destroy the volume or raise if it does not exist.""" + return IMPL.volume_destroy(context, volume_id) + + +def volume_detached(context, volume_id): + """Ensure that a volume is set as detached.""" + return IMPL.volume_detached(context, volume_id) + + +def volume_get(context, volume_id): + """Get a volume or raise if it does not exist.""" + return IMPL.volume_get(context, volume_id) + + +def volume_get_all(context, marker, limit, sort_key, sort_dir): + """Get all volumes.""" + return IMPL.volume_get_all(context, marker, limit, sort_key, sort_dir) + + +def volume_get_all_by_host(context, host): + """Get all volumes belonging to a host.""" + return IMPL.volume_get_all_by_host(context, host) + + +def volume_get_all_by_instance_uuid(context, instance_uuid): + """Get all volumes belonging to a instance.""" + return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid) + + +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir): + """Get all volumes belonging to a project.""" + return IMPL.volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir) + + +def volume_get_iscsi_target_num(context, volume_id): + """Get the target num (tid) allocated to the volume.""" + return IMPL.volume_get_iscsi_target_num(context, volume_id) + + +def volume_update(context, volume_id, values): + """Set the given properties on an volume and update it. + + Raises NotFound if volume does not exist. + + """ + return IMPL.volume_update(context, volume_id, values) + + +#################### + + +def snapshot_create(context, values): + """Create a snapshot from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Destroy the snapshot or raise if it does not exist.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a snapshot or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all snapshots.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all snapshots belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_get_all_for_volume(context, volume_id): + """Get all snapshots for a volume.""" + return IMPL.snapshot_get_all_for_volume(context, volume_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +def snapshot_data_get_for_project(context, project_id, session=None): + """Get count and gigabytes used for snapshots for specified project.""" + return IMPL.snapshot_data_get_for_project(context, + project_id, + session) + + +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Get all the snapshots inside the window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.snapshot_get_active_by_window(context, begin, end, project_id) + + +#################### + + +def snapshot_metadata_get(context, snapshot_id): + """Get all metadata for a snapshot.""" + return IMPL.snapshot_metadata_get(context, snapshot_id) + + +def snapshot_metadata_delete(context, snapshot_id, key): + """Delete the given metadata item.""" + IMPL.snapshot_metadata_delete(context, snapshot_id, key) + + +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.snapshot_metadata_update(context, snapshot_id, metadata, delete) + + +#################### + + +def volume_metadata_get(context, volume_id): + """Get all metadata for a volume.""" + return IMPL.volume_metadata_get(context, volume_id) + + +def volume_metadata_delete(context, volume_id, key): + """Delete the given metadata item.""" + IMPL.volume_metadata_delete(context, volume_id, key) + + +def volume_metadata_update(context, volume_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.volume_metadata_update(context, volume_id, metadata, delete) + + +################## + + +def volume_type_create(context, values): + """Create a new volume type.""" + return IMPL.volume_type_create(context, values) + + +def volume_type_get_all(context, inactive=False): + """Get all volume types.""" + return IMPL.volume_type_get_all(context, inactive) + + +def volume_type_get(context, id): + """Get volume type by id.""" + return IMPL.volume_type_get(context, id) + + +def volume_type_get_by_name(context, name): + """Get volume type by name.""" + return IMPL.volume_type_get_by_name(context, name) + + +def volume_type_destroy(context, id): + """Delete a volume type.""" + return IMPL.volume_type_destroy(context, id) + + +def volume_get_active_by_window(context, begin, end=None, project_id=None): + """Get all the volumes inside the window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.volume_get_active_by_window(context, begin, end, project_id) + + +#################### + + +def volume_type_extra_specs_get(context, volume_type_id): + """Get all extra specs for a volume type.""" + return IMPL.volume_type_extra_specs_get(context, volume_type_id) + + +def volume_type_extra_specs_delete(context, volume_type_id, key): + """Delete the given extra specs item.""" + IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) + + +def volume_type_extra_specs_update_or_create(context, + volume_type_id, + extra_specs): + """Create or update volume type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.volume_type_extra_specs_update_or_create(context, + volume_type_id, + extra_specs) + + +################### + + +def volume_glance_metadata_create(context, volume_id, key, value): + """Update the Glance metadata for the specified volume.""" + return IMPL.volume_glance_metadata_create(context, + volume_id, + key, + value) + + +def volume_glance_metadata_get(context, volume_id): + """Return the glance metadata for a volume.""" + return IMPL.volume_glance_metadata_get(context, volume_id) + + +def volume_snapshot_glance_metadata_get(context, snapshot_id): + """Return the Glance metadata for the specified snapshot.""" + return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id) + + +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): + """ + Update the Glance metadata for a snapshot by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the snapshot will retain the original metadata. + """ + return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, + volume_id) + + +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): + """ + Update the Glance metadata from a volume (created from a snapshot) by + copying all of the key:value pairs from the originating snapshot. This is + so that the Glance metadata from the original volume is retained. + """ + return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id, + snapshot_id) + + +def volume_glance_metadata_delete_by_volume(context, volume_id): + """Delete the glance metadata for a volume.""" + return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id) + + +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + """Delete the glance metadata for a snapshot.""" + return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) + + +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id): + """ + Update the Glance metadata for a volume by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the volume (clone) will retain the original metadata. + """ + return IMPL.volume_glance_metadata_copy_from_volume_to_volume( + context, + src_volume_id, + volume_id) + +################### + + +def sm_backend_conf_create(context, values): + """Create a new SM Backend Config entry.""" + return IMPL.sm_backend_conf_create(context, values) + + +def sm_backend_conf_update(context, sm_backend_conf_id, values): + """Update a SM Backend Config entry.""" + return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) + + +def sm_backend_conf_delete(context, sm_backend_conf_id): + """Delete a SM Backend Config.""" + return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) + + +def sm_backend_conf_get(context, sm_backend_conf_id): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) + + +def sm_backend_conf_get_by_sr(context, sr_uuid): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) + + +def sm_backend_conf_get_all(context): + """Get all SM Backend Configs.""" + return IMPL.sm_backend_conf_get_all(context) + + +#################### + + +def sm_flavor_create(context, values): + """Create a new SM Flavor entry.""" + return IMPL.sm_flavor_create(context, values) + + +def sm_flavor_update(context, sm_flavor_id, values): + """Update a SM Flavor entry.""" + return IMPL.sm_flavor_update(context, values) + + +def sm_flavor_delete(context, sm_flavor_id): + """Delete a SM Flavor.""" + return IMPL.sm_flavor_delete(context, sm_flavor_id) + + +def sm_flavor_get(context, sm_flavor): + """Get a specific SM Flavor.""" + return IMPL.sm_flavor_get(context, sm_flavor) + + +def sm_flavor_get_all(context): + """Get all SM Flavors.""" + return IMPL.sm_flavor_get_all(context) + + +#################### + + +def sm_volume_create(context, values): + """Create a new child Zone entry.""" + return IMPL.sm_volume_create(context, values) + + +def sm_volume_update(context, volume_id, values): + """Update a child Zone entry.""" + return IMPL.sm_volume_update(context, values) + + +def sm_volume_delete(context, volume_id): + """Delete a child Zone.""" + return IMPL.sm_volume_delete(context, volume_id) + + +def sm_volume_get(context, volume_id): + """Get a specific child Zone.""" + return IMPL.sm_volume_get(context, volume_id) + + +def sm_volume_get_all(context): + """Get all child Zones.""" + return IMPL.sm_volume_get_all(context) + +################### + + +def quota_create(context, project_id, resource, limit): + """Create a quota for the given project and resource.""" + return IMPL.quota_create(context, project_id, resource, limit) + + +def quota_get(context, project_id, resource): + """Retrieve a quota or raise if it does not exist.""" + return IMPL.quota_get(context, project_id, resource) + + +def quota_get_all_by_project(context, project_id): + """Retrieve all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +def quota_update(context, project_id, resource, limit): + """Update a quota or raise if it does not exist.""" + return IMPL.quota_update(context, project_id, resource, limit) + + +def quota_destroy(context, project_id, resource): + """Destroy the quota or raise if it does not exist.""" + return IMPL.quota_destroy(context, project_id, resource) + + +################### + + +def quota_class_create(context, class_name, resource, limit): + """Create a quota class for the given name and resource.""" + return IMPL.quota_class_create(context, class_name, resource, limit) + + +def quota_class_get(context, class_name, resource): + """Retrieve a quota class or raise if it does not exist.""" + return IMPL.quota_class_get(context, class_name, resource) + + +def quota_class_get_all_by_name(context, class_name): + """Retrieve all quotas associated with a given quota class.""" + return IMPL.quota_class_get_all_by_name(context, class_name) + + +def quota_class_update(context, class_name, resource, limit): + """Update a quota class or raise if it does not exist.""" + return IMPL.quota_class_update(context, class_name, resource, limit) + + +def quota_class_destroy(context, class_name, resource): + """Destroy the quota class or raise if it does not exist.""" + return IMPL.quota_class_destroy(context, class_name, resource) + + +def quota_class_destroy_all_by_name(context, class_name): + """Destroy all quotas associated with a given quota class.""" + return IMPL.quota_class_destroy_all_by_name(context, class_name) + + +################### + + +def quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh): + """Create a quota usage for the given project and resource.""" + return IMPL.quota_usage_create(context, project_id, resource, + in_use, reserved, until_refresh) + + +def quota_usage_get(context, project_id, resource): + """Retrieve a quota usage or raise if it does not exist.""" + return IMPL.quota_usage_get(context, project_id, resource) + + +def quota_usage_get_all_by_project(context, project_id): + """Retrieve all usage associated with a given resource.""" + return IMPL.quota_usage_get_all_by_project(context, project_id) + + +################### + + +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire): + """Create a reservation for the given project and resource.""" + return IMPL.reservation_create(context, uuid, usage, project_id, + resource, delta, expire) + + +def reservation_get(context, uuid): + """Retrieve a reservation or raise if it does not exist.""" + return IMPL.reservation_get(context, uuid) + + +def reservation_get_all_by_project(context, project_id): + """Retrieve all reservations associated with a given project.""" + return IMPL.reservation_get_all_by_project(context, project_id) + + +def reservation_destroy(context, uuid): + """Destroy the reservation or raise if it does not exist.""" + return IMPL.reservation_destroy(context, uuid) + + +################### + + +def quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=None): + """Check quotas and create appropriate reservations.""" + return IMPL.quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=project_id) + + +def reservation_commit(context, reservations, project_id=None): + """Commit quota reservations.""" + return IMPL.reservation_commit(context, reservations, + project_id=project_id) + + +def reservation_rollback(context, reservations, project_id=None): + """Roll back quota reservations.""" + return IMPL.reservation_rollback(context, reservations, + project_id=project_id) + + +def quota_destroy_all_by_project(context, project_id): + """Destroy all quotas associated with a given project.""" + return IMPL.quota_destroy_all_by_project(context, project_id) + + +def reservation_expire(context): + """Roll back any expired reservations.""" + return IMPL.reservation_expire(context) + + +################### + + +def backup_get(context, backup_id): + """Get a backup or raise if it does not exist.""" + return IMPL.backup_get(context, backup_id) + + +def backup_get_all(context): + """Get all backups.""" + return IMPL.backup_get_all(context) + + +def backup_get_all_by_host(context, host): + """Get all backups belonging to a host.""" + return IMPL.backup_get_all_by_host(context, host) + + +def backup_create(context, values): + """Create a backup from the values dictionary.""" + return IMPL.backup_create(context, values) + + +def backup_get_all_by_project(context, project_id): + """Get all backups belonging to a project.""" + return IMPL.backup_get_all_by_project(context, project_id) + + +def backup_update(context, backup_id, values): + """ + Set the given properties on a backup and update it. + + Raises NotFound if backup does not exist. + """ + return IMPL.backup_update(context, backup_id, values) + + +def backup_destroy(context, backup_id): + """Destroy the backup or raise if it does not exist.""" + return IMPL.backup_destroy(context, backup_id) + + +#################### + + +def share_create(context, values): + """Create new share.""" + return IMPL.share_create(context, values) + + +def share_update(context, share_id, values): + """Update share fields.""" + return IMPL.share_update(context, share_id, values) + + +def share_get(context, share_id): + """Get share by id.""" + return IMPL.share_get(context, share_id) + + +def share_get_all(context): + """Get all shares.""" + return IMPL.share_get_all(context) + + +def share_get_all_by_host(context, host): + """Returns all shares with given host.""" + return IMPL.share_get_all_by_host(context, host) + + +def share_get_all_by_project(context, project_id): + """Returns all shares with given project ID.""" + return IMPL.share_get_all_by_project(context, project_id) + + +def share_delete(context, share_id): + """Delete share.""" + return IMPL.share_delete(context, share_id) + + +################### + + +def share_access_create(context, values): + """Allow access to share.""" + return IMPL.share_access_create(context, values) + + +def share_access_get(context, access_id): + """Allow access to share.""" + return IMPL.share_access_get(context, access_id) + + +def share_access_get_all_for_share(context, share_id): + """Allow access to share.""" + return IMPL.share_access_get_all_for_share(context, share_id) + + +def share_access_delete(context, access_id): + """Deny access to share.""" + return IMPL.share_access_delete(context, access_id) + + +def share_access_update(context, access_id, values): + """Update access record.""" + return IMPL.share_access_update(context, access_id, values) + + +#################### + + +def share_snapshot_create(context, values): + """Create a snapshot from the values dictionary.""" + return IMPL.share_snapshot_create(context, values) + + +def share_snapshot_destroy(context, snapshot_id): + """Destroy the snapshot or raise if it does not exist.""" + return IMPL.share_snapshot_destroy(context, snapshot_id) + + +def share_snapshot_get(context, snapshot_id): + """Get a snapshot or raise if it does not exist.""" + return IMPL.share_snapshot_get(context, snapshot_id) + + +def share_snapshot_get_all(context): + """Get all snapshots.""" + return IMPL.share_snapshot_get_all(context) + + +def share_snapshot_get_all_by_project(context, project_id): + """Get all snapshots belonging to a project.""" + return IMPL.share_snapshot_get_all_by_project(context, project_id) + + +def share_snapshot_get_all_for_share(context, share_id): + """Get all snapshots for a share.""" + return IMPL.share_snapshot_get_all_for_share(context, share_id) + + +def share_snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + """ + return IMPL.share_snapshot_update(context, snapshot_id, values) + + +def share_snapshot_data_get_for_project(context, project_id, session=None): + """Get count and gigabytes used for snapshots for specified project.""" + return IMPL.share_snapshot_data_get_for_project(context, + project_id, + session=None) + + +#################### diff --git a/cinder/db/base.py b/cinder/db/base.py new file mode 100644 index 0000000000..edfbeb626c --- /dev/null +++ b/cinder/db/base.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base class for classes that need modular database access.""" + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import importutils + +db_driver_opt = cfg.StrOpt('db_driver', + default='cinder.db', + help='driver to use for database access') + +FLAGS = flags.FLAGS +FLAGS.register_opt(db_driver_opt) + + +class Base(object): + """DB driver is injected in the init method.""" + + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/cinder/db/migration.py b/cinder/db/migration.py new file mode 100644 index 0000000000..6079055e50 --- /dev/null +++ b/cinder/db/migration.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +from cinder import utils + + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.migration') + + +INIT_VERSION = 000 + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/cinder/db/sqlalchemy/__init__.py b/cinder/db/sqlalchemy/__init__.py new file mode 100644 index 0000000000..747015af53 --- /dev/null +++ b/cinder/db/sqlalchemy/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py new file mode 100644 index 0000000000..bc0c6c2bc1 --- /dev/null +++ b/cinder/db/sqlalchemy/api.py @@ -0,0 +1,2243 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of SQLAlchemy backend.""" + +import datetime +import uuid +import warnings + +from sqlalchemy.exc import IntegrityError +from sqlalchemy import or_ +from sqlalchemy.orm import joinedload +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql import func + +from cinder.common import sqlalchemyutils +from cinder import db +from cinder.db.sqlalchemy import models +from cinder.db.sqlalchemy.session import get_session +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils + + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def is_admin_context(context): + """Indicates if the request context is an administrator.""" + if not context: + warnings.warn(_('Use of empty request context is deprecated'), + DeprecationWarning) + raise Exception('die') + return context.is_admin + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def authorize_project_context(context, project_id): + """Ensures a request has permission to access the given project.""" + if is_user_context(context): + if not context.project_id: + raise exception.NotAuthorized() + elif context.project_id != project_id: + raise exception.NotAuthorized() + + +def authorize_user_context(context, user_id): + """Ensures a request has permission to access the given user.""" + if is_user_context(context): + if not context.user_id: + raise exception.NotAuthorized() + elif context.user_id != user_id: + raise exception.NotAuthorized() + + +def authorize_quota_class_context(context, class_name): + """Ensures a request has permission to access the given quota class.""" + if is_user_context(context): + if not context.quota_class: + raise exception.NotAuthorized() + elif context.quota_class != class_name: + raise exception.NotAuthorized() + + +def require_admin_context(f): + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]): + raise exception.AdminRequired() + return f(*args, **kwargs) + return wrapper + + +def require_context(f): + """Decorator to require *any* user or admin context. + + This does no authorization for user or project access matching, see + :py:func:`authorize_project_context` and + :py:func:`authorize_user_context`. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]) and not is_user_context(args[0]): + raise exception.NotAuthorized() + return f(*args, **kwargs) + return wrapper + + +def require_volume_exists(f): + """Decorator to require the specified volume to exist. + + Requires the wrapped function to use context and volume_id as + their first two arguments. + """ + + def wrapper(context, volume_id, *args, **kwargs): + db.volume_get(context, volume_id) + return f(context, volume_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def require_snapshot_exists(f): + """Decorator to require the specified snapshot to exist. + + Requires the wrapped function to use context and snapshot_id as + their first two arguments. + """ + + def wrapper(context, snapshot_id, *args, **kwargs): + db.api.snapshot_get(context, snapshot_id) + return f(context, snapshot_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def model_query(context, *args, **kwargs): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + :param session: if present, the session to use + :param read_deleted: if present, overrides context's read_deleted field. + :param project_only: if present and context is user-type, then restrict + query to match the context's project_id. + """ + session = kwargs.get('session') or get_session() + read_deleted = kwargs.get('read_deleted') or context.read_deleted + project_only = kwargs.get('project_only') + + query = session.query(*args) + + if read_deleted == 'no': + query = query.filter_by(deleted=False) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter_by(deleted=True) + else: + raise Exception( + _("Unrecognized read_deleted value '%s'") % read_deleted) + + if project_only and is_user_context(context): + query = query.filter_by(project_id=context.project_id) + + return query + + +def exact_filter(query, model, filters, legal_keys): + """Applies exact match filtering to a query. + + Returns the updated query. Modifies filters argument to remove + filters consumed. + + :param query: query to apply filters to + :param model: model object the query applies to, for IN-style + filtering + :param filters: dictionary of filters; values that are lists, + tuples, sets, or frozensets cause an 'IN' test to + be performed, while exact matching ('==' operator) + is used for other values + :param legal_keys: list of keys to apply exact filtering to + """ + + filter_dict = {} + + # Walk through all the keys + for key in legal_keys: + # Skip ones we're not filtering on + if key not in filters: + continue + + # OK, filtering on this key; what value do we search for? + value = filters.pop(key) + + if isinstance(value, (list, tuple, set, frozenset)): + # Looking for values in a list; apply to query directly + column_attr = getattr(model, key) + query = query.filter(column_attr.in_(value)) + else: + # OK, simple exact match; save for later + filter_dict[key] = value + + # Apply simple exact matches + if filter_dict: + query = query.filter_by(**filter_dict) + + return query + + +################### + + +@require_admin_context +def service_destroy(context, service_id): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.delete(session=session) + + +@require_admin_context +def service_get(context, service_id, session=None): + result = model_query( + context, + models.Service, + session=session).\ + filter_by(id=service_id).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=service_id) + + return result + + +@require_admin_context +def service_get_all(context, disabled=None): + query = model_query(context, models.Service) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + return query.all() + + +@require_admin_context +def service_get_all_by_topic(context, topic): + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() + + +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + result = model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=None) + return result + + +@require_admin_context +def service_get_all_by_host(context, host): + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(host=host).\ + all() + + +@require_admin_context +def _service_get_all_topic_subquery(context, session, topic, subq, label): + sort_value = getattr(subq.c, label) + return model_query(context, models.Service, + func.coalesce(sort_value, 0), + session=session, read_deleted="no").\ + filter_by(topic=topic).\ + filter_by(disabled=False).\ + outerjoin((subq, models.Service.host == subq.c.host)).\ + order_by(sort_value).\ + all() + + +@require_admin_context +def service_get_all_volume_sorted(context): + session = get_session() + with session.begin(): + topic = FLAGS.volume_topic + label = 'volume_gigabytes' + subq = model_query(context, models.Volume.host, + func.sum(models.Volume.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Volume.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_all_share_sorted(context): + session = get_session() + with session.begin(): + topic = FLAGS.share_topic + label = 'share_gigabytes' + subq = model_query(context, models.Share.host, + func.sum(models.Share.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Share.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_by_args(context, host, binary): + result = model_query(context, models.Service).\ + filter_by(host=host).\ + filter_by(binary=binary).\ + first() + + if not result: + raise exception.HostBinaryNotFound(host=host, binary=binary) + + return result + + +@require_admin_context +def service_create(context, values): + service_ref = models.Service() + service_ref.update(values) + if not FLAGS.enable_new_services: + service_ref.disabled = True + service_ref.save() + return service_ref + + +@require_admin_context +def service_update(context, service_id, values): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.update(values) + service_ref.save(session=session) + + +################### + + +def _metadata_refs(metadata_dict, meta_class): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = meta_class() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs + + +def _dict_with_extra_specs(inst_type_query): + """Takes an instance, volume, or instance type query returned + by sqlalchemy and returns it as a dictionary, converting the + extra_specs entry from a list of dicts: + + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + + to a single dict: + + 'extra_specs' : {'k1': 'v1'} + + """ + inst_type_dict = dict(inst_type_query) + extra_specs = dict([(x['key'], x['value']) + for x in inst_type_query['extra_specs']]) + inst_type_dict['extra_specs'] = extra_specs + return inst_type_dict + + +################### + + +@require_admin_context +def iscsi_target_count_by_host(context, host): + return model_query(context, models.IscsiTarget).\ + filter_by(host=host).\ + count() + + +@require_admin_context +def iscsi_target_create_safe(context, values): + iscsi_target_ref = models.IscsiTarget() + + for (key, value) in values.iteritems(): + iscsi_target_ref[key] = value + try: + iscsi_target_ref.save() + return iscsi_target_ref + except IntegrityError: + return None + + +################### + + +@require_context +def quota_get(context, project_id, resource, session=None): + result = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.ProjectQuotaNotFound(project_id=project_id) + + return result + + +@require_context +def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Quota, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_create(context, project_id, resource, limit): + quota_ref = models.Quota() + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit + quota_ref.save() + return quota_ref + + +@require_admin_context +def quota_update(context, project_id, resource, limit): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit + quota_ref.save(session=session) + + +@require_admin_context +def quota_destroy(context, project_id, resource): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.delete(session=session) + + +################### + + +@require_context +def quota_class_get(context, class_name, resource, session=None): + result = model_query(context, models.QuotaClass, session=session, + read_deleted="no").\ + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaClassNotFound(class_name=class_name) + + return result + + +@require_context +def quota_class_get_all_by_name(context, class_name): + authorize_quota_class_context(context, class_name) + + rows = model_query(context, models.QuotaClass, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + result = {'class_name': class_name} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_class_create(context, class_name, resource, limit): + quota_class_ref = models.QuotaClass() + quota_class_ref.class_name = class_name + quota_class_ref.resource = resource + quota_class_ref.hard_limit = limit + quota_class_ref.save() + return quota_class_ref + + +@require_admin_context +def quota_class_update(context, class_name, resource, limit): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.hard_limit = limit + quota_class_ref.save(session=session) + + +@require_admin_context +def quota_class_destroy(context, class_name, resource): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.delete(session=session) + + +@require_admin_context +def quota_class_destroy_all_by_name(context, class_name): + session = get_session() + with session.begin(): + quota_classes = model_query(context, models.QuotaClass, + session=session, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + for quota_class_ref in quota_classes: + quota_class_ref.delete(session=session) + + +################### + + +@require_context +def quota_usage_get(context, project_id, resource, session=None): + result = model_query(context, models.QuotaUsage, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaUsageNotFound(project_id=project_id) + + return result + + +@require_context +def quota_usage_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.QuotaUsage, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) + + return result + + +@require_admin_context +def quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh, session=None): + quota_usage_ref = models.QuotaUsage() + quota_usage_ref.project_id = project_id + quota_usage_ref.resource = resource + quota_usage_ref.in_use = in_use + quota_usage_ref.reserved = reserved + quota_usage_ref.until_refresh = until_refresh + quota_usage_ref.save(session=session) + + return quota_usage_ref + + +################### + + +@require_context +def reservation_get(context, uuid, session=None): + result = model_query(context, models.Reservation, session=session, + read_deleted="no").\ + filter_by(uuid=uuid).first() + + if not result: + raise exception.ReservationNotFound(uuid=uuid) + + return result + + +@require_context +def reservation_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Reservation, read_deleted="no").\ + filter_by(project_id=project_id).all() + + result = {'project_id': project_id} + for row in rows: + result.setdefault(row.resource, {}) + result[row.resource][row.uuid] = row.delta + + return result + + +@require_admin_context +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire, session=None): + reservation_ref = models.Reservation() + reservation_ref.uuid = uuid + reservation_ref.usage_id = usage['id'] + reservation_ref.project_id = project_id + reservation_ref.resource = resource + reservation_ref.delta = delta + reservation_ref.expire = expire + reservation_ref.save(session=session) + return reservation_ref + + +@require_admin_context +def reservation_destroy(context, uuid): + session = get_session() + with session.begin(): + reservation_ref = reservation_get(context, uuid, session=session) + reservation_ref.delete(session=session) + + +################### + + +# NOTE(johannes): The quota code uses SQL locking to ensure races don't +# cause under or over counting of resources. To avoid deadlocks, this +# code always acquires the lock on quota_usages before acquiring the lock +# on reservations. + +def _get_quota_usages(context, session, project_id): + # Broken out for testability + rows = model_query(context, models.QuotaUsage, + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + with_lockmode('update').\ + all() + return dict((row.resource, row) for row in rows) + + +@require_context +def quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=None): + elevated = context.elevated() + session = get_session() + with session.begin(): + if project_id is None: + project_id = context.project_id + + # Get the current usages + usages = _get_quota_usages(context, session, project_id) + + # Handle usage refresh + work = set(deltas.keys()) + while work: + resource = work.pop() + + # Do we need to refresh the usage? + refresh = False + if resource not in usages: + usages[resource] = quota_usage_create(elevated, + project_id, + resource, + 0, 0, + until_refresh or None, + session=session) + refresh = True + elif usages[resource].in_use < 0: + # Negative in_use count indicates a desync, so try to + # heal from that... + refresh = True + elif usages[resource].until_refresh is not None: + usages[resource].until_refresh -= 1 + if usages[resource].until_refresh <= 0: + refresh = True + elif max_age and (usages[resource].updated_at - + timeutils.utcnow()).seconds >= max_age: + refresh = True + + # OK, refresh the usage + if refresh: + # Grab the sync routine + sync = resources[resource].sync + + updates = sync(elevated, project_id, session) + for res, in_use in updates.items(): + # Make sure we have a destination for the usage! + if res not in usages: + usages[res] = quota_usage_create(elevated, + project_id, + res, + 0, 0, + until_refresh or None, + session=session) + + # Update the usage + usages[res].in_use = in_use + usages[res].until_refresh = until_refresh or None + + # Because more than one resource may be refreshed + # by the call to the sync routine, and we don't + # want to double-sync, we make sure all refreshed + # resources are dropped from the work set. + work.discard(res) + + # NOTE(Vek): We make the assumption that the sync + # routine actually refreshes the + # resources that it is the sync routine + # for. We don't check, because this is + # a best-effort mechanism. + + # Check for deltas that would go negative + unders = [resource for resource, delta in deltas.items() + if delta < 0 and + delta + usages[resource].in_use < 0] + + # Now, let's check the quotas + # NOTE(Vek): We're only concerned about positive increments. + # If a project has gone over quota, we want them to + # be able to reduce their usage without any + # problems. + overs = [resource for resource, delta in deltas.items() + if quotas[resource] >= 0 and delta >= 0 and + quotas[resource] < delta + usages[resource].total] + + # NOTE(Vek): The quota check needs to be in the transaction, + # but the transaction doesn't fail just because + # we're over quota, so the OverQuota raise is + # outside the transaction. If we did the raise + # here, our usage updates would be discarded, but + # they're not invalidated by being over-quota. + + # Create the reservations + if not overs: + reservations = [] + for resource, delta in deltas.items(): + reservation = reservation_create(elevated, + str(uuid.uuid4()), + usages[resource], + project_id, + resource, delta, expire, + session=session) + reservations.append(reservation.uuid) + + # Also update the reserved quantity + # NOTE(Vek): Again, we are only concerned here about + # positive increments. Here, though, we're + # worried about the following scenario: + # + # 1) User initiates resize down. + # 2) User allocates a new instance. + # 3) Resize down fails or is reverted. + # 4) User is now over quota. + # + # To prevent this, we only update the + # reserved value if the delta is positive. + if delta > 0: + usages[resource].reserved += delta + + # Apply updates to the usages table + for usage_ref in usages.values(): + usage_ref.save(session=session) + + if unders: + LOG.warning(_("Change will make usage less than 0 for the following " + "resources: %(unders)s") % locals()) + if overs: + usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) + for k, v in usages.items()) + raise exception.OverQuota(overs=sorted(overs), quotas=quotas, + usages=usages) + + return reservations + + +def _quota_reservations(session, context, reservations): + """Return the relevant reservations.""" + + # Get the listed reservations + return model_query(context, models.Reservation, + read_deleted="no", + session=session).\ + filter(models.Reservation.uuid.in_(reservations)).\ + with_lockmode('update').\ + all() + + +@require_context +def reservation_commit(context, reservations, project_id=None): + session = get_session() + with session.begin(): + usages = _get_quota_usages(context, session, project_id) + + for reservation in _quota_reservations(session, context, reservations): + usage = usages[reservation.resource] + if reservation.delta >= 0: + usage.reserved -= reservation.delta + usage.in_use += reservation.delta + + reservation.delete(session=session) + + for usage in usages.values(): + usage.save(session=session) + + +@require_context +def reservation_rollback(context, reservations, project_id=None): + session = get_session() + with session.begin(): + usages = _get_quota_usages(context, session, project_id) + + for reservation in _quota_reservations(session, context, reservations): + usage = usages[reservation.resource] + if reservation.delta >= 0: + usage.reserved -= reservation.delta + + reservation.delete(session=session) + + for usage in usages.values(): + usage.save(session=session) + + +@require_admin_context +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_ref in quotas: + quota_ref.delete(session=session) + + quota_usages = model_query(context, models.QuotaUsage, + session=session, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_usage_ref in quota_usages: + quota_usage_ref.delete(session=session) + + reservations = model_query(context, models.Reservation, + session=session, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for reservation_ref in reservations: + reservation_ref.delete(session=session) + + +@require_admin_context +def reservation_expire(context): + session = get_session() + with session.begin(): + current_time = timeutils.utcnow() + results = model_query(context, models.Reservation, session=session, + read_deleted="no").\ + filter(models.Reservation.expire < current_time).\ + all() + + if results: + for reservation in results: + if reservation.delta >= 0: + reservation.usage.reserved -= reservation.delta + reservation.usage.save(session=session) + + reservation.delete(session=session) + + +################### + + +@require_admin_context +def volume_allocate_iscsi_target(context, volume_id, host): + session = get_session() + with session.begin(): + iscsi_target_ref = model_query(context, models.IscsiTarget, + session=session, read_deleted="no").\ + filter_by(volume=None).\ + filter_by(host=host).\ + with_lockmode('update').\ + first() + + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not iscsi_target_ref: + raise db.NoMoreTargets() + + iscsi_target_ref.volume_id = volume_id + session.add(iscsi_target_ref) + + return iscsi_target_ref.target_num + + +@require_admin_context +def volume_attached(context, volume_id, instance_uuid, mountpoint): + if not uuidutils.is_uuid_like(instance_uuid): + raise exception.InvalidUUID(uuid=instance_uuid) + + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint + volume_ref['attach_status'] = 'attached' + volume_ref['instance_uuid'] = instance_uuid + volume_ref.save(session=session) + + +@require_context +def volume_create(context, values): + values['volume_metadata'] = _metadata_refs(values.get('metadata'), + models.VolumeMetadata) + volume_ref = models.Volume() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + volume_ref.update(values) + + session = get_session() + with session.begin(): + volume_ref.save(session=session) + + return volume_get(context, values['id'], session=session) + + +@require_admin_context +def volume_data_get_for_host(context, host, session=None): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no", + session=session).\ + filter_by(host=host).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_data_get_for_project(context, project_id, session=None): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_destroy(context, volume_id): + session = get_session() + with session.begin(): + session.query(models.Volume).\ + filter_by(id=volume_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.IscsiTarget).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def volume_detached(context, volume_id): + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'available' + volume_ref['mountpoint'] = None + volume_ref['attach_status'] = 'detached' + volume_ref['instance_uuid'] = None + volume_ref.save(session=session) + + +@require_context +def _volume_get_query(context, session=None, project_only=False): + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')) + + +@require_context +def volume_get(context, volume_id, session=None): + result = _volume_get_query(context, session=session, project_only=True).\ + filter_by(id=volume_id).\ + first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result + + +@require_admin_context +def volume_get_all(context, marker, limit, sort_key, sort_dir): + query = _volume_get_query(context) + + marker_volume = None + if marker is not None: + marker_volume = volume_get(context, marker) + + query = sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + return query.all() + + +@require_admin_context +def volume_get_all_by_host(context, host): + return _volume_get_query(context).filter_by(host=host).all() + + +@require_admin_context +def volume_get_all_by_instance_uuid(context, instance_uuid): + result = model_query(context, models.Volume, read_deleted="no").\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')).\ + filter_by(instance_uuid=instance_uuid).\ + all() + + if not result: + return [] + + return result + + +@require_context +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir): + authorize_project_context(context, project_id) + query = _volume_get_query(context).filter_by(project_id=project_id) + + marker_volume = None + if marker is not None: + marker_volume = volume_get(context, marker) + + query = sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + return query.all() + + +@require_admin_context +def volume_get_iscsi_target_num(context, volume_id): + result = model_query(context, models.IscsiTarget, read_deleted="yes").\ + filter_by(volume_id=volume_id).\ + first() + + if not result: + raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) + + return result.target_num + + +@require_context +def volume_update(context, volume_id, values): + session = get_session() + metadata = values.get('metadata') + if metadata is not None: + volume_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True) + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref.update(values) + volume_ref.save(session=session) + return volume_ref + + +#################### + +def _volume_metadata_get_query(context, volume_id, session=None): + return model_query(context, models.VolumeMetadata, + session=session, read_deleted="no").\ + filter_by(volume_id=volume_id) + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + rows = _volume_metadata_get_query(context, volume_id).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_volume_exists +def volume_metadata_delete(context, volume_id, key): + _volume_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key, session=None): + result = _volume_metadata_get_query(context, volume_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeMetadataNotFound(metadata_key=key, + volume_id=volume_id) + return result + + +@require_context +@require_volume_exists +def volume_metadata_update(context, volume_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = volume_metadata_get(context, volume_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + except exception.VolumeMetadataNotFound as e: + meta_ref = models.VolumeMetadata() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + +################### + + +@require_context +def snapshot_create(context, values): + values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), + models.SnapshotMetadata) + snapshot_ref = models.Snapshot() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + + return snapshot_get(context, values['id'], session=session) + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.Snapshot, session=session, + project_only=True).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + return model_query(context, models.Snapshot).all() + + +@require_context +def snapshot_get_all_for_volume(context, volume_id): + return model_query(context, models.Snapshot, read_deleted='no', + project_only=True).\ + filter_by(volume_id=volume_id).all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.Snapshot).\ + filter_by(project_id=project_id).\ + all() + + +@require_context +def snapshot_data_get_for_project(context, project_id, session=None): + authorize_project_context(context, project_id) + result = model_query(context, + func.count(models.Snapshot.id), + func.sum(models.Snapshot.volume_size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_context +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Return snapshots that were active during window.""" + session = get_session() + query = session.query(models.Snapshot) + + query = query.filter(or_(models.Snapshot.deleted_at == None, + models.Snapshot.deleted_at > begin)) + if end: + query = query.filter(models.Snapshot.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + +#################### + + +def _snapshot_metadata_get_query(context, snapshot_id, session=None): + return model_query(context, models.SnapshotMetadata, + session=session, read_deleted="no").\ + filter_by(snapshot_id=snapshot_id) + + +@require_context +@require_snapshot_exists +def snapshot_metadata_get(context, snapshot_id): + rows = _snapshot_metadata_get_query(context, snapshot_id).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_snapshot_exists +def snapshot_metadata_delete(context, snapshot_id, key): + _snapshot_metadata_get_query(context, snapshot_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_snapshot_exists +def snapshot_metadata_get_item(context, snapshot_id, key, session=None): + result = _snapshot_metadata_get_query(context, + snapshot_id, + session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.SnapshotMetadataNotFound(metadata_key=key, + snapshot_id=snapshot_id) + return result + + +@require_context +@require_snapshot_exists +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = snapshot_metadata_get(context, snapshot_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = snapshot_metadata_get_item(context, snapshot_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = snapshot_metadata_get_item(context, snapshot_id, + meta_key, session) + except exception.SnapshotMetadataNotFound as e: + meta_ref = models.SnapshotMetadata() + item.update({"key": meta_key, "snapshot_id": snapshot_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, id, session=session) + migration.update(values) + migration.save(session=session) + return migration + + +@require_admin_context +def migration_get(context, id, session=None): + result = model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter_by(id=id).\ + first() + + if not result: + raise exception.MigrationNotFound(migration_id=id) + + return result + + +@require_admin_context +def migration_get_by_instance_and_status(context, instance_uuid, status): + result = model_query(context, models.Migration, read_deleted="yes").\ + filter_by(instance_uuid=instance_uuid).\ + filter_by(status=status).\ + first() + + if not result: + raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, + status=status) + + return result + + +@require_admin_context +def migration_get_all_unconfirmed(context, confirm_window, session=None): + confirm_window = timeutils.utcnow() - datetime.timedelta( + seconds=confirm_window) + + return model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter(models.Migration.updated_at <= confirm_window).\ + filter_by(status="finished").\ + all() + + +################## + + +@require_admin_context +def volume_type_create(context, values): + """Create a new instance type. In order to pass in extra specs, + the values dict should contain a 'extra_specs' key/value pair: + + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + + """ + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + + session = get_session() + with session.begin(): + try: + volume_type_get_by_name(context, values['name'], session) + raise exception.VolumeTypeExists(id=values['name']) + except exception.VolumeTypeNotFoundByName: + pass + try: + volume_type_get(context, values['id'], session) + raise exception.VolumeTypeExists(id=values['id']) + except exception.VolumeTypeNotFound: + pass + try: + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + models.VolumeTypeExtraSpecs) + volume_type_ref = models.VolumeTypes() + volume_type_ref.update(values) + volume_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return volume_type_ref + + +@require_context +def volume_type_get_all(context, inactive=False, filters=None): + """ + Returns a dict describing all volume_types with name as key. + """ + filters = filters or {} + + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.VolumeTypes, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + order_by("name").\ + all() + + # TODO(sirp): this patern of converting rows to a result with extra_specs + # is repeated quite a bit, might be worth creating a method for it + result = {} + for row in rows: + result[row['name']] = _dict_with_extra_specs(row) + + return result + + +@require_context +def volume_type_get(context, id, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() + + if not result: + raise exception.VolumeTypeNotFound(volume_type_id=id) + + return _dict_with_extra_specs(result) + + +@require_context +def volume_type_get_by_name(context, name, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() + + if not result: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return _dict_with_extra_specs(result) + + +@require_admin_context +def volume_type_destroy(context, id): + volume_type_get(context, id) + + session = get_session() + with session.begin(): + session.query(models.VolumeTypes).\ + filter_by(id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_get_active_by_window(context, + begin, + end=None, + project_id=None): + """Return volumes that were active during window.""" + session = get_session() + query = session.query(models.Volume) + + query = query.filter(or_(models.Volume.deleted_at == None, + models.Volume.deleted_at > begin)) + if end: + query = query.filter(models.Volume.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() + + +#################### + + +def _volume_type_extra_specs_query(context, volume_type_id, session=None): + return model_query(context, models.VolumeTypeExtraSpecs, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id) + + +@require_context +def volume_type_extra_specs_get(context, volume_type_id): + rows = _volume_type_extra_specs_query(context, volume_type_id).\ + all() + + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +def volume_type_extra_specs_delete(context, volume_type_id, key): + _volume_type_extra_specs_query(context, volume_type_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): + result = _volume_type_extra_specs_query( + context, volume_type_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeTypeExtraSpecsNotFound( + extra_specs_key=key, + volume_type_id=volume_type_id) + + return result + + +@require_context +def volume_type_extra_specs_update_or_create(context, volume_type_id, + specs): + session = get_session() + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound, e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": False}) + spec_ref.save(session=session) + return specs + + +#################### + + +@require_context +@require_volume_exists +def volume_glance_metadata_get(context, volume_id, session=None): + """Return the Glance metadata for the specified volume.""" + if not session: + session = get_session() + + return session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).all() + + +@require_context +@require_snapshot_exists +def volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): + """Return the Glance metadata for the specified snapshot.""" + if not session: + session = get_session() + + return session.query(models.VolumeGlanceMetadata).\ + filter_by(snapshot_id=snapshot_id).\ + filter_by(deleted=False).all() + + +@require_context +@require_volume_exists +def volume_glance_metadata_create(context, volume_id, key, value, + session=None): + """ + Update the Glance metadata for a volume by adding a new key:value pair. + This API does not support changing the value of a key once it has been + created. + """ + if session is None: + session = get_session() + + with session.begin(): + rows = session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(key=key).\ + filter_by(deleted=False).all() + + if len(rows) > 0: + raise exception.GlanceMetadataExists(key=key, + volume_id=volume_id) + + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = key + vol_glance_metadata.value = value + + vol_glance_metadata.save(session=session) + + return + + +@require_context +@require_snapshot_exists +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id, + session=None): + """ + Update the Glance metadata for a snapshot by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the snapshot will retain the original metadata. + """ + if session is None: + session = get_session() + + metadata = volume_glance_metadata_get(context, volume_id, session=session) + with session.begin(): + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.snapshot_id = snapshot_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id, + session=None): + """ + Update the Glance metadata for a volume by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the volume (clone) will retain the original metadata. + """ + if session is None: + session = get_session() + + metadata = volume_glance_metadata_get(context, + src_volume_id, + session=session) + with session.begin(): + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id, + session=None): + """ + Update the Glance metadata from a volume (created from a snapshot) by + copying all of the key:value pairs from the originating snapshot. This is + so that the Glance metadata from the original volume is retained. + """ + if session is None: + session = get_session() + + metadata = volume_snapshot_glance_metadata_get(context, snapshot_id, + session=session) + with session.begin(): + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +def volume_glance_metadata_delete_by_volume(context, volume_id): + session = get_session() + session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + session = get_session() + session.query(models.VolumeGlanceMetadata).\ + filter_by(snapshot_id=snapshot_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +#################### + + +@require_admin_context +def sm_backend_conf_create(context, values): + backend_conf = models.SMBackendConf() + backend_conf.update(values) + backend_conf.save() + return backend_conf + + +@require_admin_context +def sm_backend_conf_update(context, sm_backend_id, values): + session = get_session() + with session.begin(): + backend_conf = model_query(context, models.SMBackendConf, + session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not backend_conf: + raise exception.NotFound( + _("No backend config with id %(sm_backend_id)s") % locals()) + + backend_conf.update(values) + backend_conf.save(session=session) + return backend_conf + + +@require_admin_context +def sm_backend_conf_delete(context, sm_backend_id): + # FIXME(sirp): for consistency, shouldn't this just mark as deleted with + # `purge` actually deleting the record? + session = get_session() + with session.begin(): + model_query(context, models.SMBackendConf, session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + delete() + + +@require_admin_context +def sm_backend_conf_get(context, sm_backend_id): + result = model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not result: + raise exception.NotFound(_("No backend config with id " + "%(sm_backend_id)s") % locals()) + + return result + + +@require_admin_context +def sm_backend_conf_get_by_sr(context, sr_uuid): + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(sr_uuid=sr_uuid).\ + first() + + +@require_admin_context +def sm_backend_conf_get_all(context): + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + all() + + +#################### + + +def _sm_flavor_get_query(context, sm_flavor_label, session=None): + return model_query(context, models.SMFlavors, session=session, + read_deleted="yes").\ + filter_by(label=sm_flavor_label) + + +@require_admin_context +def sm_flavor_create(context, values): + sm_flavor = models.SMFlavors() + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_update(context, sm_flavor_label, values): + sm_flavor = sm_flavor_get(context, sm_flavor_label) + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_delete(context, sm_flavor_label): + session = get_session() + with session.begin(): + _sm_flavor_get_query(context, sm_flavor_label).delete() + + +@require_admin_context +def sm_flavor_get(context, sm_flavor_label): + result = _sm_flavor_get_query(context, sm_flavor_label).first() + + if not result: + raise exception.NotFound( + _("No sm_flavor called %(sm_flavor)s") % locals()) + + return result + + +@require_admin_context +def sm_flavor_get_all(context): + return model_query(context, models.SMFlavors, read_deleted="yes").all() + + +############################### + + +def _sm_volume_get_query(context, volume_id, session=None): + return model_query(context, models.SMVolume, session=session, + read_deleted="yes").\ + filter_by(id=volume_id) + + +def sm_volume_create(context, values): + sm_volume = models.SMVolume() + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_update(context, volume_id, values): + sm_volume = sm_volume_get(context, volume_id) + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_delete(context, volume_id): + session = get_session() + with session.begin(): + _sm_volume_get_query(context, volume_id, session=session).delete() + + +def sm_volume_get(context, volume_id): + result = _sm_volume_get_query(context, volume_id).first() + + if not result: + raise exception.NotFound( + _("No sm_volume with id %(volume_id)s") % locals()) + + return result + + +def sm_volume_get_all(context): + return model_query(context, models.SMVolume, read_deleted="yes").all() + + +############################### + + +@require_context +def backup_get(context, backup_id, session=None): + result = model_query(context, models.Backup, + session=session, project_only=True).\ + filter_by(id=backup_id).\ + first() + + if not result: + raise exception.BackupNotFound(backup_id=backup_id) + + return result + + +@require_admin_context +def backup_get_all(context): + return model_query(context, models.Backup).all() + + +@require_admin_context +def backup_get_all_by_host(context, host): + return model_query(context, models.Backup).filter_by(host=host).all() + + +@require_context +def backup_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + return model_query(context, models.Backup).\ + filter_by(project_id=project_id).all() + + +@require_context +def backup_create(context, values): + backup = models.Backup() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + backup.update(values) + backup.save() + return backup + + +@require_context +def backup_update(context, backup_id, values): + session = get_session() + with session.begin(): + backup = model_query(context, models.Backup, + session=session, read_deleted="yes").\ + filter_by(id=backup_id).first() + + if not backup: + raise exception.BackupNotFound( + _("No backup with id %(backup_id)s") % locals()) + + backup.update(values) + backup.save(session=session) + return backup + + +@require_admin_context +def backup_destroy(context, backup_id): + session = get_session() + with session.begin(): + session.query(models.Backup).\ + filter_by(id=backup_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +################ + + +def _share_get_query(context, session=None): + if session is None: + session = get_session() + return model_query(context, models.Share, session=session) + + +@require_context +def share_create(context, values): + share_ref = models.Share() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + share_ref.update(values) + session = get_session() + with session.begin(): + share_ref.save(session=session) + + return share_ref + + +@require_context +def share_update(context, share_id, values): + session = get_session() + with session.begin(): + share_ref = share_get(context, share_id, session=session) + share_ref.update(values) + share_ref.save(session=session) + return share_ref + + +@require_context +def share_get(context, share_id, session=None): + result = _share_get_query(context, session).filter_by(id=share_id).first() + if result is None: + raise exception.NotFound() + return result + + +@require_admin_context +def share_get_all(context): + return _share_get_query(context).all() + + +@require_admin_context +def share_get_all_by_host(context, host): + query = _share_get_query(context) + return query.filter_by(host=host).all() + + +@require_context +def share_get_all_by_project(context, project_id): + """Returns list of shares with given project ID.""" + return _share_get_query(context).filter_by(project_id=project_id).all() + + +@require_context +def share_delete(context, share_id): + session = get_session() + share_ref = share_get(context, share_id, session) + share_ref.update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at'), + 'status': 'deleted'}) + share_ref.save(session) + + +################### + + +def _share_access_get_query(context, session, values): + """ + Get access record. + """ + query = model_query(context, models.ShareAccessMapping, session=session) + return query.filter_by(**values) + + +@require_context +def share_access_create(context, values): + session = get_session() + with session.begin(): + access_ref = models.ShareAccessMapping() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + access_ref.update(values) + access_ref.save(session=session) + return access_ref + + +@require_context +def share_access_get(context, access_id): + """ + Get access record. + """ + session = get_session() + access = _share_access_get_query(context, session, + {'id': access_id}).first() + if access: + return access + else: + raise exception.NotFound() + + +@require_context +def share_access_get_all_for_share(context, share_id): + session = get_session() + return _share_access_get_query(context, session, + {'share_id': share_id}).all() + + +@require_context +def share_access_delete(context, access_id): + session = get_session() + with session.begin(): + session.query(models.ShareAccessMapping).\ + filter_by(id=access_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at'), + 'state': models.ShareAccessMapping.STATE_DELETED}) + + +@require_context +def share_access_update(context, access_id, values): + session = get_session() + with session.begin(): + access = _share_access_get_query(context, session, {'id': access_id}) + access = access.one() + access.update(values) + access.save(session=session) + return access + + +################### + + +@require_context +def share_snapshot_create(context, values): + snapshot_ref = models.ShareSnapshot() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + + return share_snapshot_get(context, values['id'], session=session) + + +@require_admin_context +def share_snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.ShareSnapshot).\ + filter_by(id=snapshot_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def share_snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.ShareSnapshot, session=session, + project_only=True).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_admin_context +def share_snapshot_get_all(context): + return model_query(context, models.ShareSnapshot).all() + + +@require_context +def share_snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.ShareSnapshot).\ + filter_by(project_id=project_id).\ + all() + + +@require_context +def share_snapshot_get_all_for_share(context, share_id): + return model_query(context, models.ShareSnapshot, read_deleted='no', + project_only=True).\ + filter_by(share_id=share_id).all() + + +@require_context +def share_snapshot_data_get_for_project(context, project_id, session=None): + authorize_project_context(context, project_id) + result = model_query(context, + func.count(models.ShareSnapshot.id), + func.sum(models.ShareSnapshot.share_size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_context +def share_snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = share_snapshot_get(context, snapshot_id, + session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) diff --git a/cinder/db/sqlalchemy/migrate_repo/README b/cinder/db/sqlalchemy/migrate_repo/README new file mode 100644 index 0000000000..6218f8cac4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/cinder/db/sqlalchemy/migrate_repo/__init__.py b/cinder/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/db/sqlalchemy/migrate_repo/manage.py b/cinder/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 0000000000..09e340f44f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/cinder/db/sqlalchemy/migrate_repo/migrate.cfg b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 0000000000..10c685c0e5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=cinder + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py b/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py new file mode 100644 index 0000000000..e46faf4b13 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py @@ -0,0 +1,272 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String, Table + +from cinder import flags +from cinder.openstack.common import log as logging + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table( + 'migrations', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('source_compute', String(length=255)), + Column('dest_compute', String(length=255)), + Column('dest_host', String(length=255)), + Column('status', String(length=255)), + Column('instance_uuid', String(length=255)), + Column('old_instance_type_id', Integer), + Column('new_instance_type_id', Integer), + mysql_engine='InnoDB' + ) + + services = Table( + 'services', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('host', String(length=255)), + Column('binary', String(length=255)), + Column('topic', String(length=255)), + Column('report_count', Integer, nullable=False), + Column('disabled', Boolean), + Column('availability_zone', String(length=255)), + mysql_engine='InnoDB' + ) + + sm_flavors = Table( + 'sm_flavors', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('label', String(length=255)), + Column('description', String(length=255)), + mysql_engine='InnoDB' + ) + + sm_backend_config = Table( + 'sm_backend_config', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('flavor_id', Integer, ForeignKey('sm_flavors.id'), + nullable=False), + Column('sr_uuid', String(length=255)), + Column('sr_type', String(length=255)), + Column('config_params', String(length=2047)), + mysql_engine='InnoDB' + ) + + sm_volume = Table( + 'sm_volume', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + ForeignKey('volumes.id'), + primary_key=True, + nullable=False), + Column('backend_id', Integer, ForeignKey('sm_backend_config.id'), + nullable=False), + Column('vdi_uuid', String(length=255)), + mysql_engine='InnoDB' + ) + + snapshots = Table( + 'snapshots', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), primary_key=True, nullable=False), + Column('volume_id', String(length=36), nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('status', String(length=255)), + Column('progress', String(length=255)), + Column('volume_size', Integer), + Column('scheduled_at', DateTime), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + mysql_engine='InnoDB' + ) + + volume_types = Table( + 'volume_types', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('name', String(length=255)), + mysql_engine='InnoDB' + ) + + volume_metadata = Table( + 'volume_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + volume_type_extra_specs = Table( + 'volume_type_extra_specs', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('volume_type_id', Integer, ForeignKey('volume_types.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + volumes = Table( + 'volumes', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), primary_key=True, nullable=False), + Column('ec2_id', String(length=255)), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('host', String(length=255)), + Column('size', Integer), + Column('availability_zone', String(length=255)), + Column('instance_uuid', String(length=36)), + Column('mountpoint', String(length=255)), + Column('attach_time', String(length=255)), + Column('status', String(length=255)), + Column('attach_status', String(length=255)), + Column('scheduled_at', DateTime), + Column('launched_at', DateTime), + Column('terminated_at', DateTime), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + Column('provider_location', String(length=256)), + Column('provider_auth', String(length=256)), + Column('snapshot_id', String(length=36)), + Column('volume_type_id', Integer), + mysql_engine='InnoDB' + ) + + quotas = Table( + 'quotas', meta, + Column('id', Integer, primary_key=True, nullable=False), + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('project_id', String(length=255)), + Column('resource', String(length=255), nullable=False), + Column('hard_limit', Integer), + mysql_engine='InnoDB' + ) + + iscsi_targets = Table( + 'iscsi_targets', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('target_num', Integer), + Column('host', String(length=255)), + Column('volume_id', String(length=36), ForeignKey('volumes.id'), + nullable=True), + mysql_engine='InnoDB' + ) + + # create all tables + # Take care on create order for those with FK dependencies + tables = [sm_flavors, + sm_backend_config, + snapshots, + volume_types, + volumes, + iscsi_targets, + migrations, + quotas, + services, + sm_volume, + volume_metadata, + volume_type_extra_specs] + + for table in tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception(_('Exception while creating table.')) + raise + + if migrate_engine.name == "mysql": + tables = ["sm_flavors", + "sm_backend_config", + "snapshots", + "volume_types", + "volumes", + "iscsi_targets", + "migrate_version", + "migrations", + "quotas", + "services", + "sm_volume", + "volume_metadata", + "volume_type_extra_specs"] + + sql = "SET foreign_key_checks = 0;" + for table in tables: + sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table + sql += "SET foreign_key_checks = 1;" + sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \ + % migrate_engine.url.database + sql += "ALTER TABLE %s Engine=InnoDB;" % table + migrate_engine.execute(sql) + + +def downgrade(migrate_engine): + LOG.exception(_('Downgrade from initial Cinder install is unsupported.')) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py new file mode 100644 index 0000000000..fe911c0d40 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py @@ -0,0 +1,150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + quota_classes = Table('quota_classes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('class_name', + String(length=255, + convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + index=True), + Column('resource', + String(length=255, + convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False)), + Column('hard_limit', Integer(), nullable=True), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + try: + quota_classes.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(quota_classes)) + raise + + quota_usages = Table('quota_usages', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('project_id', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + index=True), + Column('resource', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False)), + Column('in_use', Integer(), nullable=False), + Column('reserved', Integer(), nullable=False), + Column('until_refresh', Integer(), nullable=True), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + try: + quota_usages.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(quota_usages)) + raise + + reservations = Table('reservations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('uuid', + String(length=36, + convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + nullable=False), + Column('usage_id', + Integer(), + ForeignKey('quota_usages.id'), + nullable=False), + Column('project_id', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + index=True), + Column('resource', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False)), + Column('delta', Integer(), nullable=False), + Column('expire', DateTime(timezone=False)), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + try: + reservations.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(reservations)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + quota_classes = Table('quota_classes', meta, autoload=True) + try: + quota_classes.drop() + except Exception: + LOG.error(_("quota_classes table not dropped")) + raise + + quota_usages = Table('quota_usages', meta, autoload=True) + try: + quota_usages.drop() + except Exception: + LOG.error(_("quota_usages table not dropped")) + raise + + reservations = Table('reservations', meta, autoload=True) + try: + reservations.drop() + except Exception: + LOG.error(_("reservations table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py b/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py new file mode 100644 index 0000000000..7d6a3b1d46 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Text, Boolean +from sqlalchemy import MetaData, Integer, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # Just for the ForeignKey and column creation to succeed, these are not the + # actual definitions of tables . + # + volumes = Table('volumes', + meta, + Column('id', Integer(), + primary_key=True, nullable=False), + mysql_engine='InnoDB') + snapshots = Table('snapshots', + meta, + Column('id', Integer(), + primary_key=True, nullable=False), + mysql_engine='InnoDB') + # Create new table + volume_glance_metadata = Table( + 'volume_glance_metadata', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id')), + Column('snapshot_id', String(length=36), + ForeignKey('snapshots.id')), + Column('key', String(255)), + Column('value', Text), + mysql_engine='InnoDB' + ) + + try: + volume_glance_metadata.create() + except Exception: + LOG.exception(_("Exception while creating table " + "'volume_glance_metedata'")) + meta.drop_all(tables=[volume_glance_metadata]) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volume_glance_metadata = Table('volume_glance_metadata', + meta, autoload=True) + try: + volume_glance_metadata.drop() + except Exception: + LOG.error(_("volume_glance_metadata table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py new file mode 100644 index 0000000000..202079b0be --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from cinder.openstack.common import log as logging +from migrate import ForeignKeyConstraint +from sqlalchemy import Integer, MetaData, String, Table + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume_type_id to UUID.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + volume_types = Table('volume_types', meta, autoload=True) + extra_specs = Table('volume_type_extra_specs', meta, autoload=True) + + fkey_remove_list = [volumes.c.volume_type_id, + volume_types.c.id, + extra_specs.c.volume_type_id] + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + + try: + fkey.drop() + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + volumes.c.volume_type_id.alter(String(36)) + volume_types.c.id.alter(String(36)) + extra_specs.c.volume_type_id.alter(String(36)) + + vtype_list = list(volume_types.select().execute()) + for t in vtype_list: + new_id = str(uuid.uuid4()) + + volumes.update().\ + where(volumes.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + extra_specs.update().\ + where(extra_specs.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + volume_types.update().\ + where(volume_types.c.id == t['id']).\ + values(id=new_id).execute() + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + try: + fkey.create() + LOG.info('Created foreign key %s' % fkey_name) + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + +def downgrade(migrate_engine): + """Convert volume_type from UUID back to int.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + volume_types = Table('volume_types', meta, autoload=True) + extra_specs = Table('volume_type_extra_specs', meta, autoload=True) + + fkey_remove_list = [volumes.c.volume_type_id, + volume_types.c.id, + extra_specs.c.volume_type_id] + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + + try: + fkey.drop() + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + vtype_list = list(volume_types.select().execute()) + new_id = 1 + + for t in vtype_list: + volumes.update().\ + where(volumes.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + extra_specs.update().\ + where(extra_specs.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + volume_types.update().\ + where(volume_types.c.id == t['id']).\ + values(id=new_id).execute() + + new_id += 1 + + volumes.c.volume_type_id.alter(Integer) + volume_types.c.id.alter(Integer) + extra_specs.c.volume_type_id.alter(Integer) + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + try: + fkey.create() + LOG.info('Created foreign key %s' % fkey_name) + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py new file mode 100644 index 0000000000..d20cda953c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common import log as logging +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Add source volume id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + source_volid = Column('source_volid', String(36)) + volumes.create_column(source_volid) + volumes.update().values(source_volid=None).execute() + + +def downgrade(migrate_engine): + """Remove source volume id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + source_volid = Column('source_volid', String(36)) + volumes.drop_column(source_volid) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql new file mode 100644 index 0000000000..c34f31752f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql @@ -0,0 +1,124 @@ +BEGIN TRANSACTION; + +CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id VARCHAR(36), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id, + source_volid + FROM volumes; + +DROP TABLE volumes; + +CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id VARCHAR(36), + volume_type_id VARCHAR(36), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id + FROM volumes_backup; + +DROP TABLE volumes_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py new file mode 100644 index 0000000000..ddd86d3228 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + provider_location = Column('provider_location', String(255)) + snapshots.create_column(provider_location) + snapshots.update().values(provider_location=None).execute() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + provider_location = snapshots.columns.provider_location + snapshots.drop_column(provider_location) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py new file mode 100644 index 0000000000..90cd67cd3b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import MetaData, Table +from migrate.changeset.constraint import ForeignKeyConstraint + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + ForeignKeyConstraint( + columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id]).create() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + ForeignKeyConstraint( + columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id]).drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql new file mode 100644 index 0000000000..d2fe9b6930 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql @@ -0,0 +1,32 @@ +-- As sqlite does not support the DROP FOREIGN KEY, we need to create +-- the table, and move all the data to it. + +BEGIN TRANSACTION; + +CREATE TABLE snapshots_v6 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + volume_id VARCHAR(36) NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + scheduled_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO snapshots_v6 SELECT * FROM snapshots; + +DROP TABLE snapshots; + +ALTER TABLE snapshots_v6 RENAME TO snapshots; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py new file mode 100644 index 0000000000..5dfbed0f63 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + backups = Table( + 'backups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', String(36), primary_key=True, nullable=False), + Column('volume_id', String(36), nullable=False), + Column('user_id', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('project_id', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('host', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('availability_zone', String(length=255, + convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('display_name', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('display_description', String(length=255, + convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('container', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('status', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('fail_reason', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('service_metadata', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('service', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('size', Integer()), + Column('object_count', Integer()), + mysql_engine='InnoDB' + ) + + try: + backups.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(backups)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + backups = Table('backups', meta, autoload=True) + try: + backups.drop() + except Exception: + LOG.error(_("backups table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py new file mode 100644 index 0000000000..3ccd79a232 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + + # New table + snapshot_metadata = Table( + 'snapshot_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('snapshot_id', String(length=36), ForeignKey('snapshots.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + try: + snapshot_metadata.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(snapshot_metadata)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + snapshot_metadata = Table('snapshot_metadata', + meta, + autoload=True) + try: + snapshot_metadata.drop() + except Exception: + LOG.error(_("snapshot_metadata table not dropped")) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py new file mode 100644 index 0000000000..11914c58f4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, String, DateTime, Boolean +from sqlalchemy import Integer, Column, ForeignKey +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Create shares and share_access_map tables.""" + meta = MetaData() + meta.bind = migrate_engine + + shares = Table('shares', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + primary_key=True, nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('host', String(length=255)), + Column('size', Integer), + Column('availability_zone', + String(length=255)), + Column('status', String(length=255)), + Column('scheduled_at', DateTime), + Column('launched_at', DateTime), + Column('terminated_at', DateTime), + Column('display_name', String(length=255)), + Column('display_description', + String(length=255)), + Column('snapshot_id', String(length=36)), + Column('share_proto', String(255)), + Column('export_location', String(255)), + mysql_engine='InnoDB') + + access_map = Table('share_access_map', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + primary_key=True, nullable=False), + Column('share_id', String(36), ForeignKey('shares.id'), + nullable=False), + Column('access_type', String(255)), + Column('access_to', String(255)), + Column('state', String(255)), + mysql_engine='InnoDB') + + shares.create() + access_map.create() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + shares = Table('shares', meta, autoload=True) + access_map = Table('share_access_map', meta, autoload=True) + access_map.drop() + shares.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py new file mode 100644 index 0000000000..7eaff3c5c9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.schema import Column, ForeignKey, MetaData, Table +from sqlalchemy.types import Boolean, DateTime, Integer, String + +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Create shares and share_access_map tables.""" + meta = MetaData() + meta.bind = migrate_engine + + shares = Table('shares', meta, autoload=True) + share_snapshots = Table( + 'share_snapshots', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), primary_key=True, nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('share_id', String(36), ForeignKey('shares.id'), + nullable=False), + Column('status', String(length=255)), + Column('progress', String(length=255)), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + Column('share_size', Integer), + Column('share_proto', String(length=255)), + Column('export_location', String(255)), + mysql_engine='InnoDB') + + try: + share_snapshots.create() + except Exception: + LOG.error(_("Table %r not created!"), share_snapshots) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + share_snapshots = Table('share_snapshots', meta, autoload=True) + try: + share_snapshots.drop() + except Exception: + LOG.error(_("share_snapshots table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py b/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/db/sqlalchemy/migration.py b/cinder/db/sqlalchemy/migration.py new file mode 100644 index 0000000000..5365d9cdcd --- /dev/null +++ b/cinder/db/sqlalchemy/migration.py @@ -0,0 +1,118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import distutils.version as dist_version +import os + +from cinder.db import migration +from cinder.db.sqlalchemy.session import get_engine +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging + + +import migrate +from migrate.versioning import util as migrate_util +import sqlalchemy + + +LOG = logging.getLogger(__name__) + + +@migrate_util.decorator +def patched_with_engine(f, *a, **kw): + url = a[0] + engine = migrate_util.construct_engine(url, **kw) + + try: + kw['engine'] = engine + return f(*a, **kw) + finally: + if isinstance(engine, migrate_util.Engine) and engine is not url: + migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) + engine.dispose() + + +# TODO(jkoelker) When migrate 0.7.3 is released and cinder depends +# on that version or higher, this can be removed +MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') +if (not hasattr(migrate, '__version__') or + dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): + migrate_util.with_engine = patched_with_engine + + +# NOTE(jkoelker) Delay importing migrate until we are patched +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository + +FLAGS = flags.FLAGS + +_REPOSITORY = None + + +def db_sync(version=None): + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.Error(_("version should be an integer")) + + current_version = db_version() + repository = _find_migrate_repo() + if version is None or version > current_version: + return versioning_api.upgrade(get_engine(), repository, version) + else: + return versioning_api.downgrade(get_engine(), repository, + version) + + +def db_version(): + repository = _find_migrate_repo() + try: + return versioning_api.db_version(get_engine(), repository) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = get_engine() + meta.reflect(bind=engine) + tables = meta.tables + if len(tables) == 0: + db_version_control(migration.INIT_VERSION) + return versioning_api.db_version(get_engine(), repository) + else: + raise exception.Error(_("Upgrade DB using Essex release first.")) + + +def db_version_control(version=None): + repository = _find_migrate_repo() + versioning_api.version_control(get_engine(), repository, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + global _REPOSITORY + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + if _REPOSITORY is None: + _REPOSITORY = Repository(path) + return _REPOSITORY diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py new file mode 100644 index 0000000000..9798347902 --- /dev/null +++ b/cinder/db/sqlalchemy/models.py @@ -0,0 +1,529 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models for cinder data. +""" + +from sqlalchemy import Column, Integer, String, Text, schema +from sqlalchemy.exc import IntegrityError +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey, DateTime, Boolean, Enum +from sqlalchemy.orm import relationship, backref, object_mapper + +from cinder.db.sqlalchemy.session import get_session + +from cinder import exception +from cinder import flags +from cinder.openstack.common import timeutils + + +FLAGS = flags.FLAGS +BASE = declarative_base() + + +class CinderBase(object): + """Base class for Cinder Models.""" + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + created_at = Column(DateTime, default=timeutils.utcnow) + updated_at = Column(DateTime, onupdate=timeutils.utcnow) + deleted_at = Column(DateTime) + deleted = Column(Boolean, default=False) + metadata = None + + def save(self, session=None): + """Save this object.""" + if not session: + session = get_session() + session.add(self) + try: + session.flush() + except IntegrityError, e: + if str(e).endswith('is not unique'): + raise exception.Duplicate(str(e)) + else: + raise + + def delete(self, session=None): + """Delete this object.""" + self.deleted = True + self.deleted_at = timeutils.utcnow() + self.save(session=session) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + self._i = iter(object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in values.iteritems(): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() + + +class Service(BASE, CinderBase): + """Represents a running service on a host.""" + + __tablename__ = 'services' + id = Column(Integer, primary_key=True) + host = Column(String(255)) # , ForeignKey('hosts.id')) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='cinder') + + +class CinderNode(BASE, CinderBase): + """Represents a running cinder service on a host.""" + + __tablename__ = 'cinder_nodes' + id = Column(Integer, primary_key=True) + service_id = Column(Integer, ForeignKey('services.id'), nullable=True) + + +class Volume(BASE, CinderBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'volumes' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.volume_name_template % self.id + + ec2_id = Column(Integer) + user_id = Column(String(255)) + project_id = Column(String(255)) + + snapshot_id = Column(String(36)) + + host = Column(String(255)) # , ForeignKey('hosts.id')) + size = Column(Integer) + availability_zone = Column(String(255)) # TODO(vish): foreign key? + instance_uuid = Column(String(36)) + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # TODO(vish): datetime + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + + volume_type_id = Column(String(36)) + source_volid = Column(String(36)) + + +class VolumeMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a volume.""" + __tablename__ = 'volume_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeMetadata.volume_id == Volume.id,' + 'VolumeMetadata.deleted == False)') + + +class VolumeTypes(BASE, CinderBase): + """Represent possible volume_types of volumes offered.""" + __tablename__ = "volume_types" + id = Column(String(36), primary_key=True) + name = Column(String(255)) + + volumes = relationship(Volume, + backref=backref('volume_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Volume.volume_type_id == VolumeTypes.id, ' + 'VolumeTypes.deleted == False)') + + +class VolumeTypeExtraSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for a volume_type.""" + __tablename__ = 'volume_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_type_id = Column(String(36), + ForeignKey('volume_types.id'), + nullable=False) + volume_type = relationship( + VolumeTypes, + backref="extra_specs", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.deleted == False)' + ) + + +class VolumeGlanceMetadata(BASE, CinderBase): + """Glance metadata for a bootable volume.""" + __tablename__ = 'volume_glance_metadata' + id = Column(Integer, primary_key=True, nullable=False) + volume_id = Column(String(36), ForeignKey('volumes.id')) + snapshot_id = Column(String(36), ForeignKey('snapshots.id')) + key = Column(String(255)) + value = Column(Text) + volume = relationship(Volume, backref="volume_glance_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeGlanceMetadata.volume_id == Volume.id,' + 'VolumeGlanceMetadata.deleted == False)') + + +class Quota(BASE, CinderBase): + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then the + default for the quota class is used. If there is no row for a + given quota class and resource, then the default for the + deployment is used. If the row is present but the hard limit is + Null, then the resource is unlimited. + """ + + __tablename__ = 'quotas' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaClass(BASE, CinderBase): + """Represents a single quota override for a quota class. + + If there is no row for a given quota class and resource, then the + default for the deployment is used. If the row is present but the + hard limit is Null, then the resource is unlimited. + """ + + __tablename__ = 'quota_classes' + id = Column(Integer, primary_key=True) + + class_name = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaUsage(BASE, CinderBase): + """Represents the current usage for a given resource.""" + + __tablename__ = 'quota_usages' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + resource = Column(String(255)) + + in_use = Column(Integer) + reserved = Column(Integer) + + @property + def total(self): + return self.in_use + self.reserved + + until_refresh = Column(Integer, nullable=True) + + +class Reservation(BASE, CinderBase): + """Represents a resource reservation for quotas.""" + + __tablename__ = 'reservations' + id = Column(Integer, primary_key=True) + uuid = Column(String(36), nullable=False) + + usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) + + project_id = Column(String(255), index=True) + resource = Column(String(255)) + + delta = Column(Integer) + expire = Column(DateTime, nullable=False) + + +class Snapshot(BASE, CinderBase): + """Represents a block storage device that can be attached to a VM.""" + __tablename__ = 'snapshots' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + + volume = relationship(Volume, backref="snapshots", + foreign_keys=volume_id, + primaryjoin='and_(' + 'Snapshot.volume_id == Volume.id,' + 'Snapshot.deleted == False)') + + +class SnapshotMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a snapshot.""" + __tablename__ = 'snapshot_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + snapshot_id = Column(String(36), + ForeignKey('snapshots.id'), + nullable=False) + snapshot = relationship(Snapshot, backref="snapshot_metadata", + foreign_keys=snapshot_id, + primaryjoin='and_(' + 'SnapshotMetadata.snapshot_id == Snapshot.id,' + 'SnapshotMetadata.deleted == False)') + + +class IscsiTarget(BASE, CinderBase): + """Represents an iscsi target for a given host.""" + __tablename__ = 'iscsi_targets' + __table_args__ = (schema.UniqueConstraint("target_num", "host"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + target_num = Column(Integer) + host = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + backref=backref('iscsi_target', uselist=False), + foreign_keys=volume_id, + primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' + 'IscsiTarget.deleted==False)') + + +class Migration(BASE, CinderBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) + # NOTE(tr3buchet): the ____compute variables are instance['host'] + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + # NOTE(tr3buchet): dest_host, btw, is an ip address + dest_host = Column(String(255)) + old_instance_type_id = Column(Integer()) + new_instance_type_id = Column(Integer()) + instance_uuid = Column(String(255), + ForeignKey('instances.uuid'), + nullable=True) + #TODO(_cerberus_): enum + status = Column(String(255)) + + +class SMFlavors(BASE, CinderBase): + """Represents a flavor for SM volumes.""" + __tablename__ = 'sm_flavors' + id = Column(Integer(), primary_key=True) + label = Column(String(255)) + description = Column(String(255)) + + +class SMBackendConf(BASE, CinderBase): + """Represents the connection to the backend for SM.""" + __tablename__ = 'sm_backend_config' + id = Column(Integer(), primary_key=True) + flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False) + sr_uuid = Column(String(255)) + sr_type = Column(String(255)) + config_params = Column(String(2047)) + + +class SMVolume(BASE, CinderBase): + __tablename__ = 'sm_volume' + id = Column(String(36), ForeignKey(Volume.id), primary_key=True) + backend_id = Column(Integer, ForeignKey('sm_backend_config.id'), + nullable=False) + vdi_uuid = Column(String(255)) + + +class Backup(BASE, CinderBase): + """Represents a backup of a volume to Swift.""" + __tablename__ = 'backups' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.backup_name_template % self.id + + user_id = Column(String(255), nullable=False) + project_id = Column(String(255), nullable=False) + + volume_id = Column(String(36), nullable=False) + host = Column(String(255)) + availability_zone = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + container = Column(String(255)) + status = Column(String(255)) + fail_reason = Column(String(255)) + service_metadata = Column(String(255)) + service = Column(String(255)) + size = Column(Integer) + object_count = Column(Integer) + + +class Share(BASE, CinderBase): + """Represents an NFS and CIFS shares.""" + __tablename__ = 'shares' + + @property + def name(self): + return FLAGS.share_name_template % self.id + + id = Column(String(36), primary_key=True) + user_id = Column(String(255)) + project_id = Column(String(255)) + host = Column(String(255)) + size = Column(Integer) + availability_zone = Column(String(255)) + status = Column(String(255)) + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + display_name = Column(String(255)) + display_description = Column(String(255)) + snapshot_id = Column(String(36)) + share_proto = Column(String(255)) + export_location = Column(String(255)) + + +class ShareAccessMapping(BASE, CinderBase): + """Represents access to NFS.""" + STATE_NEW = 'new' + STATE_ACTIVE = 'active' + STATE_DELETING = 'deleting' + STATE_DELETED = 'deleted' + STATE_ERROR = 'error' + + __tablename__ = 'share_access_map' + id = Column(String(36), primary_key=True) + share_id = Column(String(36), ForeignKey('shares.id')) + access_type = Column(String(255)) + access_to = Column(String(255)) + state = Column(Enum(STATE_NEW, STATE_ACTIVE, + STATE_DELETING, STATE_DELETED, STATE_ERROR), + default=STATE_NEW) + + +class ShareSnapshot(BASE, CinderBase): + """Represents a snapshot of a share.""" + __tablename__ = 'share_snapshots' + + @property + def name(self): + return FLAGS.share_snapshot_name_template % self.id + + @property + def share_name(self): + return FLAGS.share_name_template % self.share_id + + id = Column(String(36), primary_key=True) + user_id = Column(String(255)) + project_id = Column(String(255)) + share_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + share_size = Column(Integer) + share_proto = Column(String(255)) + export_location = Column(String(255)) + share = relationship(Share, backref="snapshots", + foreign_keys=share_id, + primaryjoin='and_(' + 'ShareSnapshot.share_id == Share.id,' + 'ShareSnapshot.deleted == False)') + + +def register_models(): + """Register Models and create metadata. + + Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. + """ + from sqlalchemy import create_engine + models = (Backup, + Migration, + Service, + Share, + ShareAccessMapping, + ShareSnapshot, + SMBackendConf, + SMFlavors, + SMVolume, + Volume, + VolumeMetadata, + SnapshotMetadata, + VolumeTypeExtraSpecs, + VolumeTypes, + VolumeGlanceMetadata, + ) + engine = create_engine(FLAGS.sql_connection, echo=False) + for model in models: + model.metadata.create_all(engine) diff --git a/cinder/db/sqlalchemy/session.py b/cinder/db/sqlalchemy/session.py new file mode 100644 index 0000000000..cd9045efae --- /dev/null +++ b/cinder/db/sqlalchemy/session.py @@ -0,0 +1,151 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend.""" + +import time + +from sqlalchemy.exc import DisconnectionError, OperationalError +import sqlalchemy.interfaces +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool + +import cinder.exception +import cinder.flags as flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None + + +def get_session(autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy session.""" + global _MAKER + + if _MAKER is None: + engine = get_engine() + _MAKER = get_maker(engine, autocommit, expire_on_commit) + + session = _MAKER() + session.query = cinder.exception.wrap_db_error(session.query) + session.flush = cinder.exception.wrap_db_error(session.flush) + return session + + +def synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def ping_listener(dbapi_conn, connection_rec, connection_proxy): + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + try: + dbapi_conn.cursor().execute('select 1') + except dbapi_conn.OperationalError, ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + LOG.warn(_('Got mysql server has gone away: %s'), ex) + raise DisconnectionError("Database server went away") + else: + raise + + +def is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + conn_err_codes = ('2002', '2003', '2006') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def get_engine(): + """Return a SQLAlchemy engine.""" + global _ENGINE + if _ENGINE is None: + connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) + + engine_args = { + "pool_recycle": FLAGS.sql_idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if FLAGS.sql_connection_debug >= 100: + engine_args['echo'] = 'debug' + elif FLAGS.sql_connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + engine_args["poolclass"] = NullPool + + if FLAGS.sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + + _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) + + if 'mysql' in connection_dict.drivername: + sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener) + elif "sqlite" in connection_dict.drivername: + if not FLAGS.sqlite_synchronous: + sqlalchemy.event.listen(_ENGINE, 'connect', + synchronous_switch_listener) + + try: + _ENGINE.connect() + except OperationalError, e: + if not is_db_connection_error(e.args[0]): + raise + + remaining = FLAGS.sql_max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warn(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(FLAGS.sql_retry_interval) + try: + _ENGINE.connect() + break + except OperationalError, e: + if ((remaining != 'infinite' and remaining == 0) or + not is_db_connection_error(e.args[0])): + raise + return _ENGINE + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) diff --git a/cinder/exception.py b/cinder/exception.py new file mode 100644 index 0000000000..a56282f056 --- /dev/null +++ b/cinder/exception.py @@ -0,0 +1,614 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cinder base exception handling. + +Includes decorator for re-raising Cinder-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +from oslo.config import cfg +import webob.exc + +from cinder import flags +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='make exception message format errors fatal'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(exc_log_opts) + + +class ConvertedException(webob.exc.WSGIHTTPException): + def __init__(self, code=0, title="", explanation=""): + self.code = code + self.title = title + self.explanation = explanation + super(ConvertedException, self).__init__() + + +class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = _('Unexpected error while running command.') + if exit_code is None: + exit_code = '-' + message = _('%(description)s\nCommand: %(cmd)s\n' + 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % locals() + IOError.__init__(self, message) + + +class Error(Exception): + pass + + +class DBError(Error): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +def wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise InvalidUnicodeParameter() + except Exception, e: + LOG.exception(_('DB exception wrapped.')) + raise DBError(e) + _wrap.func_name = f.func_name + return _wrap + + +class CinderException(Exception): + """Base Cinder Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.message % kwargs + + except Exception as e: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + if FLAGS.fatal_exception_format_errors: + raise e + else: + # at least get the core message out if something happened + message = self.message + + super(CinderException, self).__init__(message) + + +class GlanceConnectionFailed(CinderException): + message = _("Connection to glance failed") + ": %(reason)s" + + +class NotAuthorized(CinderException): + message = _("Not authorized.") + code = 403 + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class ImageNotAuthorized(CinderException): + message = _("Not authorized for image %(image_id)s.") + + +class Invalid(CinderException): + message = _("Unacceptable parameters.") + code = 400 + + +class InvalidSnapshot(Invalid): + message = _("Invalid snapshot") + ": %(reason)s" + + +class VolumeAttached(Invalid): + message = _("Volume %(volume_id)s is still attached, detach volume first.") + + +class SfJsonEncodeFailure(CinderException): + message = _("Failed to load data into json format") + + +class InvalidRequest(Invalid): + message = _("The request is invalid.") + + +class InvalidResults(Invalid): + message = _("The results are invalid.") + + +class InvalidInput(Invalid): + message = _("Invalid input received") + ": %(reason)s" + + +class InvalidVolumeType(Invalid): + message = _("Invalid volume type") + ": %(reason)s" + + +class InvalidVolume(Invalid): + message = _("Invalid volume") + ": %(reason)s" + + +class InvalidContentType(Invalid): + message = _("Invalid content type %(content_type)s.") + + +class InvalidUnicodeParameter(Invalid): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class ServiceUnavailable(Invalid): + message = _("Service is unavailable at this time.") + + +class ImageUnacceptable(Invalid): + message = _("Image %(image_id)s is unacceptable: %(reason)s") + + +class InvalidUUID(Invalid): + message = _("Expected a uuid but received %(uuid).") + + +class NotFound(CinderException): + message = _("Resource could not be found.") + code = 404 + safe = True + + +class PersistentVolumeFileNotFound(NotFound): + message = _("Volume %(volume_id)s persistence file could not be found.") + + +class VolumeNotFound(NotFound): + message = _("Volume %(volume_id)s could not be found.") + + +class SfAccountNotFound(NotFound): + message = _("Unable to locate account %(account_name)s on " + "Solidfire device") + + +class VolumeNotFoundForInstance(VolumeNotFound): + message = _("Volume not found for instance %(instance_id)s.") + + +class VolumeMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no metadata with " + "key %(metadata_key)s.") + + +class InvalidVolumeMetadata(Invalid): + message = _("Invalid metadata") + ": %(reason)s" + + +class InvalidVolumeMetadataSize(Invalid): + message = _("Invalid metadata size") + ": %(reason)s" + + +class SnapshotMetadataNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s has no metadata with " + "key %(metadata_key)s.") + + +class InvalidSnapshotMetadata(Invalid): + message = _("Invalid metadata") + ": %(reason)s" + + +class InvalidSnapshotMetadataSize(Invalid): + message = _("Invalid metadata size") + ": %(reason)s" + + +class VolumeTypeNotFound(NotFound): + message = _("Volume type %(volume_type_id)s could not be found.") + + +class VolumeTypeNotFoundByName(VolumeTypeNotFound): + message = _("Volume type with name %(volume_type_name)s " + "could not be found.") + + +class VolumeTypeExtraSpecsNotFound(NotFound): + message = _("Volume Type %(volume_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class VolumeIsBusy(CinderException): + message = _("deleting volume %(volume_name)s that has snapshot") + + +class SnapshotIsBusy(CinderException): + message = _("deleting snapshot %(snapshot_name)s that has " + "dependent volumes") + + +class ISCSITargetNotFoundForVolume(NotFound): + message = _("No target id found for volume %(volume_id)s.") + + +class ISCSITargetCreateFailed(CinderException): + message = _("Failed to create iscsi target for volume %(volume_id)s.") + + +class ISCSITargetAttachFailed(CinderException): + message = _("Failed to attach iSCSI target for volume %(volume_id)s.") + + +class ISCSITargetRemoveFailed(CinderException): + message = _("Failed to remove iscsi target for volume %(volume_id)s.") + + +class DiskNotFound(NotFound): + message = _("No disk at %(location)s") + + +class InvalidImageRef(Invalid): + message = _("Invalid image href %(image_href)s.") + + +class ImageNotFound(NotFound): + message = _("Image %(image_id)s could not be found.") + + +class ServiceNotFound(NotFound): + message = _("Service %(service_id)s could not be found.") + + +class HostNotFound(NotFound): + message = _("Host %(host)s could not be found.") + + +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class SchedulerHostWeigherNotFound(NotFound): + message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") + + +class HostBinaryNotFound(NotFound): + message = _("Could not find binary %(binary)s on host %(host)s.") + + +class InvalidReservationExpiration(Invalid): + message = _("Invalid reservation expiration %(expire)s.") + + +class InvalidQuotaValue(Invalid): + message = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class QuotaNotFound(NotFound): + message = _("Quota could not be found") + + +class QuotaResourceUnknown(QuotaNotFound): + message = _("Unknown quota resources %(unknown)s.") + + +class ProjectQuotaNotFound(QuotaNotFound): + message = _("Quota for project %(project_id)s could not be found.") + + +class QuotaClassNotFound(QuotaNotFound): + message = _("Quota class %(class_name)s could not be found.") + + +class QuotaUsageNotFound(QuotaNotFound): + message = _("Quota usage for project %(project_id)s could not be found.") + + +class ReservationNotFound(QuotaNotFound): + message = _("Quota reservation %(uuid)s could not be found.") + + +class OverQuota(CinderException): + message = _("Quota exceeded for resources: %(overs)s") + + +class MigrationNotFound(NotFound): + message = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + message = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class FileNotFound(NotFound): + message = _("File %(file_path)s could not be found.") + + +class ClassNotFound(NotFound): + message = _("Class %(class_name)s could not be found: %(exception)s") + + +class NotAllowed(CinderException): + message = _("Action not allowed.") + + +#TODO(bcwaldon): EOL this exception! +class Duplicate(CinderException): + pass + + +class KeyPairExists(Duplicate): + message = _("Key pair %(key_name)s already exists.") + + +class VolumeTypeExists(Duplicate): + message = _("Volume Type %(id)s already exists.") + + +class MigrationError(CinderException): + message = _("Migration error") + ": %(reason)s" + + +class MalformedRequestBody(CinderException): + message = _("Malformed message body: %(reason)s") + + +class ConfigNotFound(NotFound): + message = _("Could not find config at %(path)s") + + +class PasteAppNotFound(NotFound): + message = _("Could not load paste app '%(name)s' from %(path)s") + + +class NoValidHost(CinderException): + message = _("No valid host was found. %(reason)s") + + +class WillNotSchedule(CinderException): + message = _("Host %(host)s is not up or doesn't exist.") + + +class QuotaError(CinderException): + message = _("Quota exceeded") + ": code=%(code)s" + code = 413 + headers = {'Retry-After': 0} + safe = True + + +class VolumeSizeExceedsAvailableQuota(QuotaError): + message = _("Requested volume or snapshot exceeds " + "allowed Gigabytes quota") + + +class VolumeSizeExceedsQuota(QuotaError): + message = _("Maximum volume/snapshot size exceeded") + + +class VolumeLimitExceeded(QuotaError): + message = _("Maximum number of volumes allowed (%(allowed)d) exceeded") + + +class SnapshotLimitExceeded(QuotaError): + message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") + + +class DuplicateSfVolumeNames(Duplicate): + message = _("Detected more than one volume with name %(vol_name)s") + + +class Duplicate3PARHost(CinderException): + message = _("3PAR Host already exists: %(err)s. %(info)s") + + +class Invalid3PARDomain(CinderException): + message = _("Invalid 3PAR Domain: %(err)s") + + +class VolumeTypeCreateFailed(CinderException): + message = _("Cannot create volume_type with " + "name %(name)s and specs %(extra_specs)s") + + +class SolidFireAPIException(CinderException): + message = _("Bad response from SolidFire API") + + +class SolidFireAPIDataException(SolidFireAPIException): + message = _("Error in SolidFire API response: data=%(data)s") + + +class UnknownCmd(Invalid): + message = _("Unknown or unsupported command %(cmd)s") + + +class MalformedResponse(Invalid): + message = _("Malformed response to command %(cmd)s: %(reason)s") + + +class BadHTTPResponseStatus(CinderException): + message = _("Bad HTTP response status %(status)s") + + +class FailedCmdWithDump(CinderException): + message = _("Operation failed with status=%(status)s. Full dump: %(data)s") + + +class ZadaraServerCreateFailure(CinderException): + message = _("Unable to create server object for initiator %(name)s") + + +class ZadaraServerNotFound(NotFound): + message = _("Unable to find server object for initiator %(name)s") + + +class ZadaraVPSANoActiveController(CinderException): + message = _("Unable to find any active VPSA controller") + + +class ZadaraAttachmentsNotFound(NotFound): + message = _("Failed to retrieve attachments for volume %(name)s") + + +class ZadaraInvalidAttachmentInfo(Invalid): + message = _("Invalid attachment info for volume %(name)s: %(reason)s") + + +class InstanceNotFound(NotFound): + message = _("Instance %(instance_id)s could not be found.") + + +class VolumeBackendAPIException(CinderException): + message = _("Bad or unexpected response from the storage volume " + "backend API: %(data)s") + + +class NfsException(CinderException): + message = _("Unknown NFS exception") + + +class NfsNoSharesMounted(NotFound): + message = _("No mounted NFS shares found") + + +class NfsNoSuitableShareFound(NotFound): + message = _("There is no share which can host %(volume_size)sG") + + +class GlusterfsException(CinderException): + message = _("Unknown Gluster exception") + + +class GlusterfsNoSharesMounted(NotFound): + message = _("No mounted Gluster shares found") + + +class GlusterfsNoSuitableShareFound(NotFound): + message = _("There is no share which can host %(volume_size)sG") + + +class GlanceMetadataExists(Invalid): + message = _("Glance metadata cannot be updated, key %(key)s" + " exists for volume id %(volume_id)s") + + +class ImageCopyFailure(Invalid): + message = _("Failed to copy image to volume") + + +class BackupNotFound(NotFound): + message = _("Backup %(backup_id)s could not be found.") + + +class InvalidBackup(Invalid): + message = _("Invalid backup: %(reason)s") + + +class InvalidShare(CinderException): + message = _("Invalid share: %(reason)s") + + +class ShareAccessNotFound(NotFound): + message = _("Access_id %(access_id)s not found") + + +class ShareAccessExists(Duplicate): + message = _("Share access %(access_type)s:%(access)s exists") + + +class InvalidShareAccess(CinderException): + message = _("Invalid access_rule: %(reason)s") + + +class ShareIsBusy(CinderException): + message = _("Deleting $(share_name) share that used") + + +class ShareBackendException(CinderException): + message = _("Share backend error: %(msg)s") + + +class ShareSnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class ShareSnapshotIsBusy(CinderException): + message = _("Deleting snapshot %(snapshot_name)s that has " + "dependent shares.") + + +class InvalidShareSnapshot(CinderException): + message = _("Invalid share snapshot: %(reason)s") + + +class SwiftConnectionFailed(CinderException): + message = _("Connection to swift failed") + ": %(reason)s" diff --git a/cinder/flags.py b/cinder/flags.py new file mode 100644 index 0000000000..47daaa3ba9 --- /dev/null +++ b/cinder/flags.py @@ -0,0 +1,261 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Command-line flag library. + +Emulates gflags by wrapping cfg.ConfigOpts. + +The idea is to move fully to cfg eventually, and this wrapper is a +stepping stone. + +""" + +import os +import socket +import sys + +from oslo.config import cfg + +from cinder import version + +FLAGS = cfg.CONF + + +def parse_args(argv, default_config_files=None): + FLAGS(argv[1:], project='cinder', + version=version.version_string(), + default_config_files=default_config_files) + + +class UnrecognizedFlag(Exception): + pass + + +def DECLARE(name, module_string, flag_values=FLAGS): + if module_string not in sys.modules: + __import__(module_string, globals(), locals()) + if name not in flag_values: + raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) + + +def _get_my_ip(): + """ + Returns the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return "127.0.0.1" + + +core_opts = [ + cfg.StrOpt('connection_type', + default=None, + help='Virtualization api connection type : libvirt, xenapi, ' + 'or fake'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True), + cfg.IntOpt('sql_connection_debug', + default=0, + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for cinder-api'), + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the cinder python module is installed'), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help='Directory where cinder binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining cinder's state"), ] + +debug_opts = [ +] + +FLAGS.register_cli_opts(core_opts) +FLAGS.register_cli_opts(debug_opts) + +global_opts = [ + cfg.StrOpt('my_ip', + default=_get_my_ip(), + help='ip address of this host'), + cfg.StrOpt('glance_host', + default='$my_ip', + help='default glance hostname or ip'), + cfg.IntOpt('glance_port', + default=9292, + help='default glance port'), + cfg.ListOpt('glance_api_servers', + default=['$glance_host:$glance_port'], + help='A list of the glance api servers available to cinder ' + '([hostname|ip]:port)'), + cfg.IntOpt('glance_api_version', + default=1, + help='Version of the glance api to use'), + cfg.IntOpt('glance_num_retries', + default=0, + help='Number retries when downloading an image from glance'), + cfg.BoolOpt('glance_api_insecure', + default=False, + help='Allow to perform insecure SSL (https) requests to ' + 'glance'), + cfg.StrOpt('scheduler_topic', + default='cinder-scheduler', + help='the topic scheduler nodes listen on'), + cfg.StrOpt('volume_topic', + default='cinder-volume', + help='the topic volume nodes listen on'), + cfg.StrOpt('backup_topic', + default='cinder-backup', + help='the topic volume backup nodes listen on'), + cfg.StrOpt('share_topic', + default='cinder-share', + help='the topic share nodes listen on'), + cfg.BoolOpt('enable_v1_api', + default=True, + help=_("Deploy v1 of the Cinder API. ")), + cfg.BoolOpt('enable_v2_api', + default=True, + help=_("Deploy v2 of the Cinder API. ")), + cfg.BoolOpt('api_rate_limit', + default=True, + help='whether to rate limit the api'), + cfg.ListOpt('osapi_volume_ext_list', + default=[], + help='Specify list of extensions to load when using osapi_' + 'volume_extension option with cinder.api.contrib.' + 'select_extensions'), + cfg.MultiStrOpt('osapi_volume_extension', + default=['cinder.api.contrib.standard_extensions'], + help='osapi volume extension to load'), + cfg.StrOpt('osapi_volume_base_URL', + default=None, + help='Base URL that will be presented to users in links ' + 'to the OpenStack Volume API', + deprecated_name='osapi_compute_link_prefix'), + cfg.IntOpt('osapi_max_limit', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), + cfg.StrOpt('sqlite_db', + default='cinder.sqlite', + help='the filename to use with sqlite'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If passed, use synchronous mode for sqlite'), + cfg.IntOpt('sql_idle_timeout', + default=3600, + help='timeout before idle sql connections are reaped'), + cfg.IntOpt('sql_max_retries', + default=10, + help='maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('sql_retry_interval', + default=10, + help='interval between retries of opening a sql connection'), + cfg.StrOpt('volume_manager', + default='cinder.volume.manager.VolumeManager', + help='full class name for the Manager for volume'), + cfg.StrOpt('backup_manager', + default='cinder.backup.manager.BackupManager', + help='full class name for the Manager for volume backup'), + cfg.StrOpt('scheduler_manager', + default='cinder.scheduler.manager.SchedulerManager', + help='full class name for the Manager for scheduler'), + cfg.StrOpt('share_manager', + default='cinder.share.manager.ShareManager', + help='full class name for the Manager for share'), + cfg.StrOpt('host', + default=socket.gethostname(), + help='Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address.'), + # NOTE(vish): default to nova for compatibility with nova installs + cfg.StrOpt('storage_availability_zone', + default='nova', + help='availability zone of this node'), + cfg.ListOpt('memcached_servers', + default=None, + help='Memcached servers or None for in process cache.'), + cfg.StrOpt('default_volume_type', + default=None, + help='default volume type to use'), + cfg.StrOpt('volume_usage_audit_period', + default='month', + help='time period to generate volume usages for. ' + 'Time period must be hour, day, month or year'), + cfg.StrOpt('root_helper', + default='sudo', + help='Deprecated: command to use for running commands as root'), + cfg.StrOpt('rootwrap_config', + default=None, + help='Path to the rootwrap configuration file to use for ' + 'running commands as root'), + cfg.BoolOpt('monkey_patch', + default=False, + help='Whether to log monkey patching'), + cfg.ListOpt('monkey_patch_modules', + default=[], + help='List of modules/decorators to monkey patch'), + cfg.IntOpt('service_down_time', + default=60, + help='maximum time since last check-in for up service'), + cfg.StrOpt('volume_api_class', + default='cinder.volume.api.API', + help='The full class name of the volume API class to use'), + cfg.StrOpt('backup_api_class', + default='cinder.backup.api.API', + help='The full class name of the volume backup API class'), + cfg.StrOpt('share_api_class', + default='cinder.share.api.API', + help='The full class name of the share API class to use'), + cfg.StrOpt('auth_strategy', + default='noauth', + help='The strategy to use for auth. Supports noauth, keystone, ' + 'and deprecated.'), + cfg.ListOpt('enabled_backends', + default=None, + help='A list of backend names to use. These backend names ' + 'should be backed by a unique [CONFIG] group ' + 'with its options'), + cfg.ListOpt('enabled_share_backends', + default=None, + help='A list of share backend names to use. These backend ' + 'names should be backed by a unique [CONFIG] group ' + 'with its options'), + cfg.BoolOpt('no_snapshot_gb_quota', + default=False, + help='Whether snapshots count against GigaByte quota'), ] + +FLAGS.register_opts(global_opts) diff --git a/cinder/image/__init__.py b/cinder/image/__init__.py new file mode 100644 index 0000000000..7affa08d4c --- /dev/null +++ b/cinder/image/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/image/glance.py b/cinder/image/glance.py new file mode 100644 index 0000000000..bfec5288e7 --- /dev/null +++ b/cinder/image/glance.py @@ -0,0 +1,460 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of an image service that uses Glance as the backend""" + +from __future__ import absolute_import + +import copy +import itertools +import random +import sys +import time +import urlparse + +import glanceclient +import glanceclient.exc + +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +def _parse_image_ref(image_href): + """Parse an image href into composite parts. + + :param image_href: href of an image + :returns: a tuple of the form (image_id, host, port) + :raises ValueError + + """ + url = urlparse.urlparse(image_href) + port = url.port or 80 + host = url.netloc.split(':', 1)[0] + image_id = url.path.split('/')[-1] + use_ssl = (url.scheme == 'https') + return (image_id, host, port, use_ssl) + + +def _create_glance_client(context, host, port, use_ssl, + version=FLAGS.glance_api_version): + """Instantiate a new glanceclient.Client object""" + if version is None: + version = FLAGS.glance_api_version + if use_ssl: + scheme = 'https' + else: + scheme = 'http' + params = {} + params['insecure'] = FLAGS.glance_api_insecure + if FLAGS.auth_strategy == 'keystone': + params['token'] = context.auth_token + endpoint = '%s://%s:%s' % (scheme, host, port) + return glanceclient.Client(str(version), endpoint, **params) + + +def get_api_servers(): + """ + Shuffle a list of FLAGS.glance_api_servers and return an iterator + that will cycle through the list, looping around to the beginning + if necessary. + """ + api_servers = [] + for api_server in FLAGS.glance_api_servers: + if '//' not in api_server: + api_server = 'http://' + api_server + url = urlparse.urlparse(api_server) + port = url.port or 80 + host = url.netloc.split(':', 1)[0] + use_ssl = (url.scheme == 'https') + api_servers.append((host, port, use_ssl)) + random.shuffle(api_servers) + return itertools.cycle(api_servers) + + +class GlanceClientWrapper(object): + """Glance client wrapper class that implements retries.""" + + def __init__(self, context=None, host=None, port=None, use_ssl=False, + version=None): + if host is not None: + self.client = self._create_static_client(context, + host, port, + use_ssl, version) + else: + self.client = None + self.api_servers = None + self.version = version + + def _create_static_client(self, context, host, port, use_ssl, version): + """Create a client that we'll use for every call.""" + self.host = host + self.port = port + self.use_ssl = use_ssl + self.version = version + return _create_glance_client(context, + self.host, self.port, + self.use_ssl, self.version) + + def _create_onetime_client(self, context, version): + """Create a client that will be used for one call.""" + if self.api_servers is None: + self.api_servers = get_api_servers() + self.host, self.port, self.use_ssl = self.api_servers.next() + return _create_glance_client(context, + self.host, self.port, + self.use_ssl, version) + + def call(self, context, method, *args, **kwargs): + """ + Call a glance client method. If we get a connection error, + retry the request according to FLAGS.glance_num_retries. + """ + version = self.version + if version in kwargs: + version = kwargs['version'] + + retry_excs = (glanceclient.exc.ServiceUnavailable, + glanceclient.exc.InvalidEndpoint, + glanceclient.exc.CommunicationError) + num_attempts = 1 + FLAGS.glance_num_retries + + for attempt in xrange(1, num_attempts + 1): + client = self.client or self._create_onetime_client(context, + version) + try: + return getattr(client.images, method)(*args, **kwargs) + except retry_excs as e: + host = self.host + port = self.port + extra = "retrying" + error_msg = _("Error contacting glance server " + "'%(host)s:%(port)s' for '%(method)s', " + "%(extra)s.") + if attempt == num_attempts: + extra = 'done trying' + LOG.exception(error_msg, locals()) + raise exception.GlanceConnectionFailed(host=host, + port=port, + reason=str(e)) + LOG.exception(error_msg, locals()) + time.sleep(1) + + +class GlanceImageService(object): + """Provides storage and retrieval of disk image objects within Glance.""" + + def __init__(self, client=None): + self._client = client or GlanceClientWrapper() + + def detail(self, context, **kwargs): + """Calls out to Glance for a list of detailed image information.""" + params = self._extract_query_params(kwargs) + try: + images = self._client.call(context, 'list', **params) + except Exception: + _reraise_translated_exception() + + _images = [] + for image in images: + if self._is_image_available(context, image): + _images.append(self._translate_from_glance(image)) + + return _images + + def _extract_query_params(self, params): + _params = {} + accepted_params = ('filters', 'marker', 'limit', + 'sort_key', 'sort_dir') + for param in accepted_params: + if param in params: + _params[param] = params.get(param) + + # ensure filters is a dict + _params.setdefault('filters', {}) + # NOTE(vish): don't filter out private images + _params['filters'].setdefault('is_public', 'none') + + return _params + + def show(self, context, image_id): + """Returns a dict with image data for the given opaque image id.""" + try: + image = self._client.call(context, 'get', image_id) + except Exception: + _reraise_translated_image_exception(image_id) + + if not self._is_image_available(context, image): + raise exception.ImageNotFound(image_id=image_id) + + base_image_meta = self._translate_from_glance(image) + return base_image_meta + + def get_location(self, context, image_id): + """Returns the direct url representing the backend storage location, + or None if this attribute is not shown by Glance.""" + try: + client = GlanceClientWrapper() + image_meta = client.call(context, 'get', image_id) + except Exception: + _reraise_translated_image_exception(image_id) + + if not self._is_image_available(context, image_meta): + raise exception.ImageNotFound(image_id=image_id) + + return getattr(image_meta, 'direct_url', None) + + def download(self, context, image_id, data): + """Calls out to Glance for metadata and data and writes data.""" + try: + image_chunks = self._client.call(context, 'data', image_id) + except Exception: + _reraise_translated_image_exception(image_id) + + for chunk in image_chunks: + data.write(chunk) + + def create(self, context, image_meta, data=None): + """Store the image data and return the new image object.""" + sent_service_image_meta = self._translate_to_glance(image_meta) + + if data: + sent_service_image_meta['data'] = data + + recv_service_image_meta = self._client.call(context, 'create', + **sent_service_image_meta) + + return self._translate_from_glance(recv_service_image_meta) + + def update(self, context, image_id, + image_meta, data=None, purge_props=True): + """Modify the given image with the new data.""" + image_meta = self._translate_to_glance(image_meta) + image_meta['purge_props'] = purge_props + #NOTE(bcwaldon): id is not an editable field, but it is likely to be + # passed in by calling code. Let's be nice and ignore it. + image_meta.pop('id', None) + if data: + image_meta['data'] = data + try: + image_meta = self._client.call(context, 'update', image_id, + **image_meta) + except Exception: + _reraise_translated_image_exception(image_id) + else: + return self._translate_from_glance(image_meta) + + def delete(self, context, image_id): + """Delete the given image. + + :raises: ImageNotFound if the image does not exist. + :raises: NotAuthorized if the user is not an owner. + + """ + try: + self._client.call(context, 'delete', image_id) + except glanceclient.exc.NotFound: + raise exception.ImageNotFound(image_id=image_id) + return True + + @staticmethod + def _translate_to_glance(image_meta): + image_meta = _convert_to_string(image_meta) + image_meta = _remove_read_only(image_meta) + return image_meta + + @staticmethod + def _translate_from_glance(image): + image_meta = _extract_attributes(image) + image_meta = _convert_timestamps_to_datetimes(image_meta) + image_meta = _convert_from_string(image_meta) + return image_meta + + @staticmethod + def _is_image_available(context, image): + """Check image availability. + + This check is needed in case Nova and Glance are deployed + without authentication turned on. + """ + # The presence of an auth token implies this is an authenticated + # request and we need not handle the noauth use-case. + if hasattr(context, 'auth_token') and context.auth_token: + return True + + if image.is_public or context.is_admin: + return True + + properties = image.properties + + if context.project_id and ('owner_id' in properties): + return str(properties['owner_id']) == str(context.project_id) + + if context.project_id and ('project_id' in properties): + return str(properties['project_id']) == str(context.project_id) + + try: + user_id = properties['user_id'] + except KeyError: + return False + + return str(user_id) == str(context.user_id) + + +def _convert_timestamps_to_datetimes(image_meta): + """Returns image with timestamp fields converted to datetime objects.""" + for attr in ['created_at', 'updated_at', 'deleted_at']: + if image_meta.get(attr): + image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) + return image_meta + + +# NOTE(bcwaldon): used to store non-string data in glance metadata +def _json_loads(properties, attr): + prop = properties[attr] + if isinstance(prop, basestring): + properties[attr] = jsonutils.loads(prop) + + +def _json_dumps(properties, attr): + prop = properties[attr] + if not isinstance(prop, basestring): + properties[attr] = jsonutils.dumps(prop) + + +_CONVERT_PROPS = ('block_device_mapping', 'mappings') + + +def _convert(method, metadata): + metadata = copy.deepcopy(metadata) + properties = metadata.get('properties') + if properties: + for attr in _CONVERT_PROPS: + if attr in properties: + method(properties, attr) + + return metadata + + +def _convert_from_string(metadata): + return _convert(_json_loads, metadata) + + +def _convert_to_string(metadata): + return _convert(_json_dumps, metadata) + + +def _extract_attributes(image): + IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', + 'container_format', 'checksum', 'id', + 'name', 'created_at', 'updated_at', + 'deleted_at', 'deleted', 'status', + 'min_disk', 'min_ram', 'is_public'] + output = {} + for attr in IMAGE_ATTRIBUTES: + output[attr] = getattr(image, attr, None) + + output['properties'] = getattr(image, 'properties', {}) + + return output + + +def _remove_read_only(image_meta): + IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] + output = copy.deepcopy(image_meta) + for attr in IMAGE_ATTRIBUTES: + if attr in output: + del output[attr] + return output + + +def _reraise_translated_image_exception(image_id): + """Transform the exception for the image but keep its traceback intact.""" + exc_type, exc_value, exc_trace = sys.exc_info() + new_exc = _translate_image_exception(image_id, exc_value) + raise new_exc, None, exc_trace + + +def _reraise_translated_exception(): + """Transform the exception but keep its traceback intact.""" + exc_type, exc_value, exc_trace = sys.exc_info() + new_exc = _translate_plain_exception(exc_value) + raise new_exc, None, exc_trace + + +def _translate_image_exception(image_id, exc_value): + if isinstance(exc_value, (glanceclient.exc.Forbidden, + glanceclient.exc.Unauthorized)): + return exception.ImageNotAuthorized(image_id=image_id) + if isinstance(exc_value, glanceclient.exc.NotFound): + return exception.ImageNotFound(image_id=image_id) + if isinstance(exc_value, glanceclient.exc.BadRequest): + return exception.Invalid(exc_value) + return exc_value + + +def _translate_plain_exception(exc_value): + if isinstance(exc_value, (glanceclient.exc.Forbidden, + glanceclient.exc.Unauthorized)): + return exception.NotAuthorized(exc_value) + if isinstance(exc_value, glanceclient.exc.NotFound): + return exception.NotFound(exc_value) + if isinstance(exc_value, glanceclient.exc.BadRequest): + return exception.Invalid(exc_value) + return exc_value + + +def get_remote_image_service(context, image_href): + """Create an image_service and parse the id from the given image_href. + + The image_href param can be an href of the form + 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', + or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the + image_href is a standalone id, then the default image service is returned. + + :param image_href: href that describes the location of an image + :returns: a tuple of the form (image_service, image_id) + + """ + #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a + # standalone image ID + if '/' not in str(image_href): + image_service = get_default_image_service() + return image_service, image_href + + try: + (image_id, glance_host, glance_port, use_ssl) = \ + _parse_image_ref(image_href) + glance_client = GlanceClientWrapper(context=context, + host=glance_host, + port=glance_port, + use_ssl=use_ssl) + except ValueError: + raise exception.InvalidImageRef(image_href=image_href) + + image_service = GlanceImageService(client=glance_client) + return image_service, image_id + + +def get_default_image_service(): + return GlanceImageService() diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py new file mode 100644 index 0000000000..417f2b90c9 --- /dev/null +++ b/cinder/image/image_utils.py @@ -0,0 +1,283 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods to deal with images. + +This is essentially a copy from nova.virt.images.py +Some slight modifications, but at some point +we should look at maybe pushign this up to OSLO +""" + +import os +import re +import tempfile + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + +image_helper_opt = [cfg.StrOpt('image_conversion_dir', + default='/tmp', + help='parent dir for tempdir used for image conversion'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(image_helper_opt) + + +class QemuImgInfo(object): + BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:" + r"\s+(.*?)\)\s*$"), re.I) + TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$") + SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I) + + def __init__(self, cmd_output): + details = self._parse(cmd_output) + self.image = details.get('image') + self.backing_file = details.get('backing_file') + self.file_format = details.get('file_format') + self.virtual_size = details.get('virtual_size') + self.cluster_size = details.get('cluster_size') + self.disk_size = details.get('disk_size') + self.snapshots = details.get('snapshot_list', []) + self.encryption = details.get('encryption') + + def __str__(self): + lines = [ + 'image: %s' % self.image, + 'file_format: %s' % self.file_format, + 'virtual_size: %s' % self.virtual_size, + 'disk_size: %s' % self.disk_size, + 'cluster_size: %s' % self.cluster_size, + 'backing_file: %s' % self.backing_file, + ] + if self.snapshots: + lines.append("snapshots: %s" % self.snapshots) + return "\n".join(lines) + + def _canonicalize(self, field): + # Standardize on underscores/lc/no dash and no spaces + # since qemu seems to have mixed outputs here... and + # this format allows for better integration with python + # - ie for usage in kwargs and such... + field = field.lower().strip() + for c in (" ", "-"): + field = field.replace(c, '_') + return field + + def _extract_bytes(self, details): + # Replace it with the byte amount + real_size = self.SIZE_RE.search(details) + if real_size: + details = real_size.group(1) + try: + details = utils.to_bytes(details) + except (TypeError, ValueError): + pass + return details + + def _extract_details(self, root_cmd, root_details, lines_after): + consumed_lines = 0 + real_details = root_details + if root_cmd == 'backing_file': + # Replace it with the real backing file + backing_match = self.BACKING_FILE_RE.match(root_details) + if backing_match: + real_details = backing_match.group(2).strip() + elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: + # Replace it with the byte amount (if we can convert it) + real_details = self._extract_bytes(root_details) + elif root_cmd == 'file_format': + real_details = real_details.strip().lower() + elif root_cmd == 'snapshot_list': + # Next line should be a header, starting with 'ID' + if not lines_after or not lines_after[0].startswith("ID"): + msg = _("Snapshot list encountered but no header found!") + raise ValueError(msg) + consumed_lines += 1 + possible_contents = lines_after[1:] + real_details = [] + # This is the sprintf pattern we will try to match + # "%-10s%-20s%7s%20s%15s" + # ID TAG VM SIZE DATE VM CLOCK (current header) + for line in possible_contents: + line_pieces = line.split(None) + if len(line_pieces) != 6: + break + else: + # Check against this pattern occuring in the final position + # "%02d:%02d:%02d.%03d" + date_pieces = line_pieces[5].split(":") + if len(date_pieces) != 3: + break + real_details.append({ + 'id': line_pieces[0], + 'tag': line_pieces[1], + 'vm_size': line_pieces[2], + 'date': line_pieces[3], + 'vm_clock': line_pieces[4] + " " + line_pieces[5], + }) + consumed_lines += 1 + return (real_details, consumed_lines) + + def _parse(self, cmd_output): + # Analysis done of qemu-img.c to figure out what is going on here + # Find all points start with some chars and then a ':' then a newline + # and then handle the results of those 'top level' items in a separate + # function. + # + # TODO(harlowja): newer versions might have a json output format + # we should switch to that whenever possible. + # see: http://bit.ly/XLJXDX + if not cmd_output: + cmd_output = '' + contents = {} + lines = cmd_output.splitlines() + i = 0 + line_am = len(lines) + while i < line_am: + line = lines[i] + if not line.strip(): + i += 1 + continue + consumed_lines = 0 + top_level = self.TOP_LEVEL_RE.match(line) + if top_level: + root = self._canonicalize(top_level.group(1)) + if not root: + i += 1 + continue + root_details = top_level.group(2).strip() + details, consumed_lines = self._extract_details(root, + root_details, + lines[i + 1:]) + contents[root] = details + i += consumed_lines + 1 + return contents + + +def qemu_img_info(path): + """Return a object containing the parsed output from qemu-img info.""" + out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', + 'qemu-img', 'info', path, + run_as_root=True) + return QemuImgInfo(out) + + +def convert_image(source, dest, out_format): + """Convert image to other format""" + cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) + utils.execute(*cmd, run_as_root=True) + + +def fetch(context, image_service, image_id, path, _user_id, _project_id): + # TODO(vish): Improve context handling and add owner and auth data + # when it is added to glance. Right now there is no + # auth checking in glance, so we assume that access was + # checked before we got here. + with utils.remove_path_on_error(path): + with open(path, "wb") as image_file: + image_service.download(context, image_id, image_file) + + +def fetch_to_raw(context, image_service, + image_id, dest, + user_id=None, project_id=None): + if (FLAGS.image_conversion_dir and not + os.path.exists(FLAGS.image_conversion_dir)): + os.makedirs(FLAGS.image_conversion_dir) + + # NOTE(avishay): I'm not crazy about creating temp files which may be + # large and cause disk full errors which would confuse users. + # Unfortunately it seems that you can't pipe to 'qemu-img convert' because + # it seeks. Maybe we can think of something for a future version. + fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir) + os.close(fd) + with utils.remove_path_on_error(tmp): + fetch(context, image_service, image_id, tmp, user_id, project_id) + + data = qemu_img_info(tmp) + fmt = data.file_format + if fmt is None: + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), + image_id=image_id) + + backing_file = data.backing_file + if backing_file is not None: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("fmt=%(fmt)s backed by:" + "%(backing_file)s") % locals()) + + # NOTE(jdg): I'm using qemu-img convert to write + # to the volume regardless if it *needs* conversion or not + # TODO(avishay): We can speed this up by checking if the image is raw + # and if so, writing directly to the device. However, we need to keep + # check via 'qemu-img info' that what we copied was in fact a raw + # image and not a different format with a backing file, which may be + # malicious. + LOG.debug("%s was %s, converting to raw" % (image_id, fmt)) + convert_image(tmp, dest, 'raw') + + data = qemu_img_info(dest) + if data.file_format != "raw": + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("Converted to raw, but format is now %s") % + data.file_format) + os.unlink(tmp) + + +def upload_volume(context, image_service, image_meta, volume_path): + image_id = image_meta['id'] + if (image_meta['disk_format'] == 'raw'): + LOG.debug("%s was raw, no need to convert to %s" % + (image_id, image_meta['disk_format'])) + with utils.temporary_chown(volume_path): + with utils.file_open(volume_path) as image_file: + image_service.update(context, image_id, {}, image_file) + return + + if (FLAGS.image_conversion_dir and not + os.path.exists(FLAGS.image_conversion_dir)): + os.makedirs(FLAGS.image_conversion_dir) + + fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir) + os.close(fd) + with utils.remove_path_on_error(tmp): + LOG.debug("%s was raw, converting to %s" % + (image_id, image_meta['disk_format'])) + convert_image(volume_path, tmp, image_meta['disk_format']) + + data = qemu_img_info(tmp) + if data.file_format != image_meta['disk_format']: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("Converted to %(f1)s, but format is now %(f2)s") % + {'f1': image_meta['disk_format'], 'f2': data.file_format}) + + with utils.file_open(tmp) as image_file: + image_service.update(context, image_id, {}, image_file) + os.unlink(tmp) diff --git a/cinder/locale/bg_BG/LC_MESSAGES/cinder.po b/cinder/locale/bg_BG/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..99d199eec5 --- /dev/null +++ b/cinder/locale/bg_BG/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Bulgarian (Bulgaria) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Bulgarian (Bulgaria) " +"(http://www.transifex.com/projects/p/openstack/language/bg_BG/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/bs/LC_MESSAGES/cinder.po b/cinder/locale/bs/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f6c4550d49 --- /dev/null +++ b/cinder/locale/bs/LC_MESSAGES/cinder.po @@ -0,0 +1,5581 @@ +# Bosnian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-01-19 20:22+0000\n" +"Last-Translator: yazar \n" +"Language-Team: Bosnian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/cinder.pot b/cinder/locale/cinder.pot new file mode 100644 index 0000000000..1ab42f6799 --- /dev/null +++ b/cinder/locale/cinder.pot @@ -0,0 +1,5573 @@ +# Translations template for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# FIRST AUTHOR , 2013. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: cinder jenkins.cinder.propose.translation.update.5\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/cs/LC_MESSAGES/cinder.po b/cinder/locale/cs/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f733b85c1f --- /dev/null +++ b/cinder/locale/cs/LC_MESSAGES/cinder.po @@ -0,0 +1,5631 @@ +# Czech translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-04 20:28+0000\n" +"Last-Translator: Zbyněk Schwarz \n" +"Language-Team: Czech \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Příkaz: %(cmd)s\n" +"Kód ukončení: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "Vyjímka DB zabalena." + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Připojení k glance selhalo" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Neschváleno." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "Uživatel nemá správcovská oprávnění" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Pravidla nedovolují, aby bylo %(action)s provedeno." + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Kernel nenalezen v obrazu %(image_id)s." + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Nepřijatelné parametry." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Neplatný snímek" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Svazek %(volume_id)s není k ničemu připojen" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Nelze načíst data do formátu json" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "Požadavek je neplatný." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "Požadavek je neplatný." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Obdržen neplatný vstup" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Neplatný typ svazku" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Neplatný svazek" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Neplatný typ obsahu %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "Služba je v tuto chvíli nedostupná." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "Zdroj nemohl být nalezen." + +#: cinder/exception.py:229 +#, fuzzy, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/exception.py:237 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Nelze nalézt účet %(account_name) on zařízení Solidfire" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "Neplatná metadata" + +#: cinder/exception.py:255 cinder/exception.py:268 +#, fuzzy +msgid "Invalid metadata size" +msgstr "Neplatný klíč metadata" + +#: cinder/exception.py:259 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Instance %(instance_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Typ svazku %(volume_type_id)s nemohl být nalezen." + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Typ svazku %(volume_type_id)s nemá žádné dodatečné parametry s klíčem " +"%(extra_specs_key)s." + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Snímek %(snapshot_id)s nemohl být nalezen." + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:307 +#, fuzzy, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "Źádný disk ve %(location)s" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Neplatný href %(image_href)s obrazu." + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Obraz %(image_id)s nemohl být nalezen." + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Služba %(service_id)s nemohla být nalezena." + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." + +#: cinder/exception.py:339 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s." + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "Kvóta nemohla být nalezena." + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." + +#: cinder/exception.py:368 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:372 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." + +#: cinder/exception.py:376 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Uživatel %(user_id)s nemohl být nalezen." + +#: cinder/exception.py:380 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Kvóta překročena" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Přesun %(migration_id)s nemohl být nalezen." + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Přesun nenalezen v instanci %(instance_id)s se stavem %(status)s." + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "Soubor %(file_path)s nemohl být nalezen." + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "Činnost není povolena." + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "Dvojice klíčů %(key_name)s již existuje." + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Typ svazku %(name)s již existuje." + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "Chyba přesunu" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Nelze najít nastavení v %(path)s" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Hostitel %(host)s není dostupný nebo neexistuje." + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "Kvóta překročena" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " +"%(extra_specs)s" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" + +#: cinder/exception.py:499 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Neplatný stav serveru: %(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Instance %(instance_id)s nemohla být nastavena." + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Skupina LDAP %(group_id)s nemohla být nalezena." + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Neplatná podpůrná vrstva: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Připojení k glance selhalo" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" +"Přeskakování %(full_task_name)s, zbývá %(ticks_to_skip)s tiků do dalšího " +"spuštění" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Spuštění pravidelné úlohy %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Chyba při %(full_task_name)s: %(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Oznamování schopností plácinderčům ..." + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "Soubor JSON představující zásady" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Kontrolované pravidlo, když požadované není nalezeno" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Vnitřní výjimka: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Začínající uzel %(topic)s (verze %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "Vytváření připojení zákazníka pro službu %s" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Ukončena služba bez záznamu v databázi" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Objekt databáze služby zmizel, je znovu vytvářen." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Obnoveno připojení modelového serveru!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "modelový server je nedostupný" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Úplná sada PŘÍZNAKŮ:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s : SADA PŽÍZNAKŮ " + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Získávání %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Získány neznámé argumenty klíčového slova pro utils.execute: %r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Spouštění příkazu (podproces): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Výsledek byl %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r selhalo. Opakování." + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Spouštění příkazu (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "Prostředí není podporováno přes SSH" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "process_input není podporován přes SSH" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "ladění ve zpětném volání: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Adresa místního spojení nenalezena.: %s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Nelze získat IP místního spojení %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Neplatná podpůrná vrstva: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "podpůrná vrstva: %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "v opakujícím volání" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "Očekáván objekt typu: %s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' trvalo %(total_time).2f sek" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, fuzzy, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "%(name)s spuštěno v %(host)s:%(port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Zastavování serveru WSGI." + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "Server WSGI byl zastaven." + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "Musíte zavést __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "parametr limit musí být celé číslo" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "parametr limit musí být kladný" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "parametr offset musí být celé číslo" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "parametr offset musí být kladný" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "značka [%s] nenalezena" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s neobsahuje verzi" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "Zavádění správce rozšíření." + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "Načteno rozšíření: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "Název roz: %s" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "Přezdívká roz: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "Popis roz: %s" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "Jmenný prostor roz: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "Roz aktualizováno: %s" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "Načítání rozšíření %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "Volání továrny rozšíření %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Nelze načít rozšížení %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "prvek není podřazený" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "kořenový prvek volí seznam" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" +"Neshoda stromu šablony; přidávání sluhy %(slavetag)s k pánovi " +"%(mastertag)s" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "podtřídy musí zavádět construct()!" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "Nsprávný formát těla požadavku" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +#, fuzzy +msgid "Snapshot not found." +msgstr "Server nenalezen." + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "XML nelze porozumět" + +#: cinder/api/contrib/hosts.py:133 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Neplatný stav: '%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Neplatné nastavení aktualizace: '%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource je funkce pouze pro správce" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +#, fuzzy +msgid "Request body empty" +msgstr "Nsprávný formát těla požadavku" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "Neshoda s tělem požadavku a URI" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "Tělo požadavku obsahuje příliš mnoho položek" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "Neplatné tělo požadavku" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Zachycena chyba: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s vrácena s HTTP %(status)d" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "Musí být určena třída ExtensionManager" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "Rozšířený zdroj: %s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "Rozšíření %(ext_name)s: nelze rozšířit %(collection)s: Žádný takový zdroj" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Rozšíření %(ext_name)s: rozšiřování zdroje %(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "JSON nelze porozumět" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "příliš mnoho klíčů těla" + +#: cinder/api/openstack/wsgi.py:581 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "Rozšířený zdroj: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "Vyvolána chyba: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Vyvolána výjimka HTTP: %s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "V požadavku zadán nerozpoznaný Content-Type" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "V požadavku nezadán Content-Type" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "V požadavku zadáno prázdné tělo" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "Žádná taková činnost: %s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "Poškozené tělo požadavku" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Nepodporovaný Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Poškozená url požadavku" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s vrátilo chybu: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Pouze %(value)s požadavky %(verb)s mohou být provedeny pro %(uri)s " +"každých %(unit_string)s." + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "Tento požadavek má omezen množství." + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "Instance neexistuje" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "Položka metadat nenalezena" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "Server neexistuje" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "Zadáno neplatné imageRef." + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Vytvořit svazek o %s GB" + +#: cinder/api/v1/volumes.py:418 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" + +#: cinder/api/v2/volumes.py:359 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Instance neexistuje" + +#: cinder/backup/services/swift.py:127 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "Instance neexistuje" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "Očekáván objekt typu: %s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "Požadavek je neplatný." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "Původní výjimka je zahozena" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Získán semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:199 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Pokus o získání zámku souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:226 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:234 +#, fuzzy, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "zařízení záznamu systému musí být jedno z: %s" + +#: cinder/openstack/common/log.py:537 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Třída %(fullname)s je zastaralá: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +#, fuzzy +msgid "in fixed duration looping call" +msgstr "v opakujícím volání" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +#, fuzzy +msgid "in dynamic looping call" +msgstr "v opakujícím volání" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Úplná sada PŘÍZNAKŮ:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "pro zprávu není metoda: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Pro zprávu není metoda: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID je %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Neplatné znovu použití připojení RPC." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Volajícímu je vrácena výjimka: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, fuzzy, python-format +msgid "Deserializing: %s" +msgstr "Popis roz: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Zrušení registrace obrazu %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "Požadavek je neplatný." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +#, fuzzy +msgid "Could not find another host" +msgstr "Nelze najít %s v požadavku." + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Neplatný snímek" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Odpojit svazek %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Zachycena chyba: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, fuzzy, python-format +msgid "Message : %(message)s" +msgstr "%(code)s: %(message)s" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +#, fuzzy +msgid "JSON Error" +msgstr "Chyba přesunu" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "odpověď %s" + +#: cinder/volume/drivers/coraid.py:199 +#, fuzzy, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Nelze restartovat instanci" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, fuzzy, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "skupina %s již existuje" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "značka [%s] nenalezena" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Neplatný snímek" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "odpověď %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Nelze vytvořit typ instance" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "Instance neexistuje" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, fuzzy, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/volume/drivers/storwize_svc.py:574 +#, fuzzy, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Příkaz: %(cmd)s\n" +"Kód ukončení: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Instance nenalezena" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Chyba v přesunu %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +#, fuzzy +msgid "Storage type not found." +msgstr "Obraz nenalezen" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +#, fuzzy +msgid "Masking View not found." +msgstr "Obraz nenalezen" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +#, fuzzy +msgid "Ecom user not found." +msgstr "Server nenalezen." + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +#, fuzzy +msgid "Ecom server not found." +msgstr "Server nenalezen." + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Znovu připojeno k frontě" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Role %(role_id)s nemohla být nalezena." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "Chyba v přesunu %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, fuzzy, python-format +msgid "create_export: volume name:%s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, fuzzy, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Znovu připojeno k frontě" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +#, fuzzy +msgid "Bad response from server" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "odpověď %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "skupina %s již existuje" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/da/LC_MESSAGES/cinder.po b/cinder/locale/da/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..cb3418a31a --- /dev/null +++ b/cinder/locale/da/LC_MESSAGES/cinder.po @@ -0,0 +1,5573 @@ +# Danish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-01-15 21:46+0000\n" +"Last-Translator: Soren Hansen \n" +"Language-Team: Danish \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "bind %s: slettet" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/de/LC_MESSAGES/cinder.po b/cinder/locale/de/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..d0ded78bb7 --- /dev/null +++ b/cinder/locale/de/LC_MESSAGES/cinder.po @@ -0,0 +1,5584 @@ +# German translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:23+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: German \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Alle vorhandenen FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Hole %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Führe Kommando (subprocess) aus: %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Ergebnis war %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Alle vorhandenen FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID ist %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Volume %s: wird entfernt" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "Volume %s: erfolgreich erstellt" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "Volume %s: wird entfernt" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "Volume %s: entferne Export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/en_AU/LC_MESSAGES/cinder.po b/cinder/locale/en_AU/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..edf412a44e --- /dev/null +++ b/cinder/locale/en_AU/LC_MESSAGES/cinder.po @@ -0,0 +1,5605 @@ +# English (Australia) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-10-21 11:27+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: English (Australia) \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "no method for message: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "group %s already exists" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backend: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Connection to libvirt broke" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Inner Exception: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "start address" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "NotFound raised: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Starting %(topic)s node (version %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Connecting to libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Secret Key change for user %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Caught error: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: snapshotting" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "volume group %s doesn't exist" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "volume group %s doesn't exist" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Unable to detach volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Got exception: %s" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "received %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "no method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "No method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "De-registering image %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake does not have an implementation for %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Detach volume %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Caught error: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume is not local to this node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: snapshotting" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "response %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Going to start terminating instances" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "group %s already exists" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Create volume of %s GB" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd has no pool %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "instance %s: snapshotting" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "response %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Going to start terminating instances" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: skipping export" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Deleting user %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Error starting xvp: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconnected to queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Running instances: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconnected to queue" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Running instances: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "response %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "group %s already exists" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/en_GB/LC_MESSAGES/cinder.po b/cinder/locale/en_GB/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..0e31102dab --- /dev/null +++ b/cinder/locale/en_GB/LC_MESSAGES/cinder.po @@ -0,0 +1,5605 @@ +# English (United Kingdom) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-30 11:10+0000\n" +"Last-Translator: Anthony Harrington \n" +"Language-Team: English (United Kingdom) \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Connection to glance failed" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Not authorised." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "User does not have admin privileges" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Policy doesn't allow %(action)s to be performed." + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Unacceptable parameters." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Invalid snapshot" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Volume %(volume_id)s is not attached to anything" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Failed to load data into json format" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Invalid input received" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Invalid volume type" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Invalid volume" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Invalid content type %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "An unknown exception occurred." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "An unknown exception occurred." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backend: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Connection to glance failed" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Inner Exception: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Full set of FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to destroy VBD %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to locate volume %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to destroy VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: snapshotting" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "The request is invalid." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Unable to detach volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to detach volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to detach volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Full set of FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "Received %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "The request is invalid." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake does not have an implementation for %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Invalid snapshot" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume %s: deleting" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume is not local to this node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: snapshotting" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Unable to detach volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Invalid snapshot" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Failed to decrypt text" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: skipping export" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Link Local address is not found.:%s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/en_US/LC_MESSAGES/cinder.po b/cinder/locale/en_US/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..52a35261c4 --- /dev/null +++ b/cinder/locale/en_US/LC_MESSAGES/cinder.po @@ -0,0 +1,5977 @@ +# English (United States) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: markmc \n" +"Language-Team: en_US \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "Arguments dropped when creating context: %s" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "read_deleted can only be one of 'no', 'yes' or 'only', not %r" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Connection to glance failed" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Not authorized." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "User does not have admin privileges" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Policy doesn't allow %(action)s to be performed." + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Not authorized for image %(image_id)s." + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Unacceptable parameters." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Invalid snapshot" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Volume %(volume_id)s is still attached, detach volume first." + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Failed to load data into json format" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "The results are invalid." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Invalid input received" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Invalid volume type" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Invalid volume" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Invalid content type %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "Invalid Parameter: Unicode is not supported by the current database." + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "Service is unavailable at this time." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Image %(image_id)s is unacceptable: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "Expected a uuid but received %(uuid)." + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "Resource could not be found." + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "Volume %(volume_id)s persistence file could not be found." + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Volume %(volume_id)s could not be found." + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Unable to locate account %(account_name)s on Solidfire device" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Volume not found for instance %(instance_id)s." + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Volume %(volume_id)s has no metadata with key %(metadata_key)s." + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "Invalid metadata" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "Invalid metadata size" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Volume type %(volume_type_id)s could not be found." + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Volume type with name %(volume_type_name)s could not be found." + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Snapshot %(snapshot_id)s could not be found." + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "deleting volume %(volume_name)s that has snapshot" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "deleting snapshot %(snapshot_name)s that has dependent volumes" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "No target id found for volume %(volume_id)s." + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Failed to create iscsi target for volume %(volume_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "Failed to attach iSCSI target for volume %(volume_id)s." + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Failed to remove iscsi target for volume %(volume_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "No disk at %(location)s" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Invalid image href %(image_href)s." + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Image %(image_id)s could not be found." + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Service %(service_id)s could not be found." + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Host %(host)s could not be found." + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Scheduler Host Filter %(filter_name)s could not be found." + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Scheduler Host Weigher %(weigher_name)s could not be found." + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Could not find binary %(binary)s on host %(host)s." + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "Invalid reservation expiration %(expire)s." + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "Quota could not be found" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "Unknown quota resources %(unknown)s." + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Quota for project %(project_id)s could not be found." + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Quota class %(class_name)s could not be found." + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Quota usage for project %(project_id)s could not be found." + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Quota reservation %(uuid)s could not be found." + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Quota exceeded for resources: %(overs)s" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Migration %(migration_id)s could not be found." + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Migration not found for instance %(instance_id)s with status %(status)s." + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "File %(file_path)s could not be found." + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Class %(class_name)s could not be found: %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "Action not allowed." + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "Key pair %(key_name)s already exists." + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Volume Type %(id)s already exists." + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "Migration error" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Malformed message body: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Could not find config at %(path)s" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Could not load paste app '%(name)s' from %(path)s" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "No valid host was found. %(reason)s" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Host %(host)s is not up or doesn't exist." + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "Quota exceeded" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "Requested volume or snapshot exceeds allowed Gigabytes quota" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "Maximum volume/snapshot size exceeded" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "Maximum number of volumes allowed (%(allowed)d) exceeded" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "Maximum number of snapshots allowed (%(allowed)d) exceeded" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Detected more than one volume with name %(vol_name)s" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "3PAR Host already exists: %(err)s. %(info)s" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "Invalid 3PAR Domain: %(err)s" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "Bad response from SolidFire API" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Error in SolidFire API response: data=%(data)s" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "Unknown or unsupported command %(cmd)s" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Malformed response to command %(cmd)s: %(reason)s" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Bad HTTP response status %(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "Operation failed with status=%(status)s. Full dump: %(data)s" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create server object for initiator %(name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "Unable to find server object for initiator %(name)s" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "Unable to find any active VPSA controller" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "Failed to retrieve attachments for volume %(name)s" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "Invalid attachment info for volume %(name)s: %(reason)s" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Instance %(instance_id)s could not be found." + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "Bad or unexpected response from the storage volume backend API: %(data)s" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "Unknown NFS exception" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "No mounted NFS shares found" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "There is no share which can host %(volume_size)sG" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "Unknown Gluster exception" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "No mounted Gluster shares found" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "Failed to copy image to volume" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Backup %(backup_id)s could not be found." + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backup: %(reason)s" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "Connection to swift failed" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "Deploy v1 of the Cinder API. " + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "Deploy v2 of the Cinder API. " + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Running periodic task %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Error during %(full_task_name)s: %(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Notifying Schedulers of capabilities ..." + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "JSON file representing policy" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Rule checked when requested rule is not found" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "Created reservations %(reservations)s" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "Failed to commit reservations %(reservations)s" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "Failed to roll back reservations %(reservations)s" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "SIGTERM received" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "Child %(pid)d exited with status %(code)d" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "pid %d not in child list" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "_wait_child %d" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "wait wrap.failed %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Starting %(topic)s node (version %(version_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "Creating Consumer connection for Service %s" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Full set of FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s : FLAG SET " + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Got unknown keyword args to utils.execute: %r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r failed. Retrying." + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "Environment not supported over SSH" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "process_input not supported over SSH" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "Specify a password or private_key" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "Error connecting via ssh: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "in looping call" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "Expected object of type: %s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' took %(total_time).2f secs" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Could not remove tmpdir: %s" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "Unknown byte multiplier: %s" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find cert_file : %s" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find key_file : %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "Could not bind to %(host)s:%(port)s after trying for 30 seconds" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "Started %(name)s on %(_host)s:%(_port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Stopping WSGI server." + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "WSGI server has stopped." + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "limit param must be an integer" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "limit param must be positive" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "offset param must be an integer" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "offset param must be positive" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "marker [%s] not found" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s does not contain version" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "Initializing extension manager." + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "Loaded extension: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "Ext name: %s" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "Ext alias: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "Ext description: %s" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "Ext namespace: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "Ext updated: %s" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "Exception loading extension: %s" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "Loading extension %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "Calling extension factory %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "osapi_volume_extension is set to deprecated path: %s" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Failed to load extension %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Failed to load extension %(classpath)s: %(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Failed to load extension %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "element is not a child" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "root element selecting a list" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "subclasses must implement construct()!" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "Updating %(resource)s '%(id)s' with '%(update)r'" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "show called for member %s" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "delete called for member %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "Delete backup with id: %s" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "Creating new backup %s" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "Incorrect request body format" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "Creating backup of volume %(volume_id)s in container %(container)s" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "Restoring backup %(backup_id)s (%(body)s)" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "Restoring backup %(backup_id)s to volume %(volume_id)s" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "Snapshot not found." + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "cannot understand XML" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "Host '%s' could not be found." + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Invalid status: '%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Invalid update setting: '%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Setting host %(host)s to %(state)s." + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource is admin only functionality" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "Host not found" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "Quota limit must be -1 or greater." + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "Request body empty" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "Request body and URI mismatch" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "Request body contains too many items" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "Invalid request body" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "No image_name was specified in request." + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Caught error: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s returned with HTTP %(status)d" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "Request is too large." + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "Must specify an ExtensionManager class" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "Extended resource: %s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Extension %(ext_name)s extending resource: %(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "cannot understand JSON" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "too many body keys" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "Exception handling resource: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "Fault thrown: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP exception thrown: %s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "Unrecognized Content-Type provided in request" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "No Content-Type provided in request" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "Empty body provided in request" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "There is no such action: %s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "Malformed request body" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Unsupported Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Malformed request url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s returned a fault: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "This request was rate-limited." + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "snapshot does not exist" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "Metadata item was not found" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "Delete snapshot with id: %s" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Create snapshot from volume %s" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "Invalid value '%s' for force. " + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "volume does not exist" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "Delete volume with id: %s" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "Invalid imageRef provided." + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Removing options '%(bad_options)s' from query" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "Removing options '%s' from query" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "Backup status must be available or error" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "Volume to be backed up must be available" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "Backup status must be available" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "Backup to be restored has invalid size" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "Creating volume of %(size)s GB for restore of backup %(backup_id)s" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "Volume to be restored to must be available" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "Volume to be restored to is smaller than the backup to be restored" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "Cleaning up incomplete backup operations" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "Resetting volume %s to available (was backing-up)" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "Resetting volume %s to error_restoring (was restoring-backup)" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "Resetting backup %s to error (was creating)" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "Resetting backup %s to available (was restoring)" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "Resuming delete on backup: %s" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "create_backup finished. backup: %s" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "delete_backup started, backup: %s" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "delete_backup finished, backup %s deleted" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "unsupported compression algorithm: %s" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "_check_container_exists: container: %s" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "container %s does not exist" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "container %s exists" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "_create_container started, container: %(container)s,backup: %(backup_id)s" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "_generate_swift_object_name_prefix: %s" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "generated object list: %s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "_write_metadata finished" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "_read_metadata finished (%s)" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "volume size %d is invalid." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "reading chunk of data from volume" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "not compressing data" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "About to put_object" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "swift MD5 for %(object_name)s: %(etag)s" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "backup MD5 for %(object_name)s: %(md5)s" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "Calling eventlet.sleep(0)" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "backup %s finished." + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "v1 swift volume backup restore of %s started" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "metadata_object_names = %s" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "decompressing data using %s algorithm" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "v1 swift volume backup restore of %s finished" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "Restoring swift backup version %s" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "No support to restore swift backup version %s" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "restore %(backup_id)s to %(volume_id)s finished." + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "swift error while listing objects, continuing with delete" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "swift error while deleting object %s, continuing with delete" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "deleted swift object: %(swift_object_name)s in container: %(container)s" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "delete %s finished" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "Creating iscsi_target for: %s" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "Failed to create iscsi target for volume id:%(vol_id)s." + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "Removing iscsi_target for: %s" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Failed to remove iscsi target for volume id:%(vol_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "valid iqn needed for show_target" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "Removing iscsi_target for volume: %s" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "rtstool is not installed correctly" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "Creating iscsi_target for volume: %s" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "Removing iscsi_target: %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "Failed to add initiator iqn %s to target" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Unable to find group: %(group)s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Fail to create volume %(volname)s" + +#: cinder/brick/local_dev/lvm.py:75 +#, fuzzy +msgid "Error creating Volume Group" +msgstr "error refreshing volume stats" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, fuzzy, python-format +msgid "StdOut :%s" +msgstr "casted to %s" + +#: cinder/brick/local_dev/lvm.py:78 +#, fuzzy, python-format +msgid "StdErr :%s" +msgstr "casted to %s" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Fail to create volume %(volname)s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "Id not in sort_keys; is sort_keys unique?" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "Unknown sort direction, must be 'desc' or 'asc'" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "Unrecognized read_deleted value '%s'" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "Change will make usage less than 0 for the following resources: %(unders)s" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "No backend config with id %(sm_backend_id)s" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "No sm_flavor called %(sm_flavor)s" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "No sm_volume with id %(volume_id)s" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "No backup with id %(backup_id)s" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "version should be an integer" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "Upgrade DB using Essex release first." + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "Got mysql server has gone away: %s" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL connection failed. %s attempts left." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "Exception while creating table." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "Downgrade from initial Cinder install is unsupported." + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "Table |%s| not created!" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "quota_classes table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "quota_usages table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "reservations table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "Exception while creating table 'volume_glance_metedata'" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "volume_glance_metadata table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "backups table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "snapshot_metadata table not dropped" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "Snapshot list encountered but no header found!" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info' parsing failed." + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "fmt=%(fmt)s backed by:%(backing_file)s" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Converted to raw, but format is now %s" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "Converted to %(f1)s, but format is now %(f2)s" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "Uncaught exception" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "Could not release the acquired lock `%s`" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "Deprecated: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "Error loading logging config %(log_config)s: %(err_msg)s" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "syslog facility must be one of: %s" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Fatal call to deprecated config: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "task run outlasted interval by %s sec" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "in fixed duration looping call" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "Dynamic looping call sleeping for %.02f seconds" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "in dynamic looping call" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "Failed to understand rule %(match)r" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "Inheritance-based rules are deprecated; update _check_%s" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "No handler for matches of kind %s" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "Full set of CONF:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s not in valid priorities" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "Failed to load notifier %s. These notifications will not be sent." + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "Could not send notification to %(topic)s. Payload=%(message)s" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "Pool creating new connection" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "no calling threads waiting for msg_id : %s, message : %s" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "UNIQUE_ID is %s." + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "no method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "No method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "Expected exception during message handling (%s)" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "Making synchronous call on %s ..." + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "Making asynchronous cast on %s..." + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "Making asynchronous fanout cast..." + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "Sending %(event_type)s on %(topic)s" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "An unknown RPC related exception occurred." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "Found duplicate message(%(msg_id)s). Skipping it." + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "Specified RPC version, %(version)s, not supported by this endpoint." + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "Failed to sanitize %(item)s. Key error %(err)s" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "Timed out waiting for RPC response: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "Re-established AMQP queues" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "Deserializing: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "Connecting to %(addr)s with %(type)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "-> Subscribed to %(subscribe)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "-> bind: %(bind)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "Could not open socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "Subscribing to %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "You cannot recv on this socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "You cannot send on this socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "Running func with context: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "Sending reply" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "Out reactor registered" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "CONSUMER GOT %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "Topic contained dangerous characters." + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "ROUTER RELAY-OUT SUCCEEDED %(data)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "ROUTER RELAY-OUT QUEUED %(data)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "Could not create IPC directory %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "CONSUMER RECEIVED DATA: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "ROUTER RELAY-OUT %(data)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "Consumer is a zmq.%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "Creating payload" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "Creating queue socket for reply waiter" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "Sending cast" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "Cast sent; Waiting reply" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "Received message: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "Unpacking response" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "Unsupported or unknown ZMQ envelope returned." + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "RPC Message Invalid." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "%(msg)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "Sending message(s) to: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "No matchmaker results. Not casting." + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "No match from matchmaker." + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "Match not found by MatchMaker." + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "Matchmaker does not implement registration or heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "Matchmaker unregistered: %s, %s" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "Register before starting heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "No key defining hosts for topic '%s', see ringfile" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "Is the appropriate service running?" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "Could not find another host" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "Must implement schedule_create_volume" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "Invalid value for 'scheduler_max_attempts', must be >=1" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "Filtered %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "Choosing %(best_host)s" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "Ignoring %(service_name)s service update from %(host)s" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Received %(service_name)s service update from %(host)s." + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "service is down or disabled." + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Failed to schedule_%(method)s: %(ex)s" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "Could not stat scheduler options file %(filename)s: '%(e)s'" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "Could not decode scheduler options: '%(e)s'" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "Not enough allocatable volume gigabytes remaining" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "Free capacity not set: volume node info collection broken." + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Faking execution of cmd (subprocess): %s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "Faked command matched %s" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "Faked command raised an exception %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"The following migrations are missing a downgrade:\n" +"\t%s" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "unrecognized argument %s" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "Run CLI command: %s" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "Given data: %s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "Result data: %s" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "Invalid input" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "volume: %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "Authentication error" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "Authorization error" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "Item not found" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Doing %(method)s on %(relative_url)s" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "Body: %s" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "Unexpected status code" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "Decoding JSON: %s" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "May specify only one of snapshot, imageRef or source volume" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "status must be available" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "Volume size cannot be lesser than the Snapshot size" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "Unable to clone volumes that are in an error state" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "Clones currently must be >= original volume size." + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "Volume size '%s' must be an integer and greater than 0" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "Size of specified image is larger than volume size." + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "Image minDisk size is larger than the volume size." + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "Failed to update quota for deleting volume" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "Volume status must be available or error" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "Volume still has %d dependent snapshots" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "Searching by: %s" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "already attached" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "already detached" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available to reserve" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "must be available" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "Volume Snapshot status must be available or error" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "Metadata property key blank" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "Metadata property key greater than 255 characters" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "Metadata property value greater than 255 characters" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available/in-use." + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "Volume status is in-use." + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "ISCSI provider_location not stored, using discovery" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery: Found %s" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "copy_image_to_volume %s." + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "copy_volume_to_image %s." + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "iSCSI device not found at %s" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "Found iSCSI node %(host_device)s (after %(tries)s rescans)" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "Updating volume status" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "Driver must implement initialize_connection" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "Driver path %s is deprecated, update your configuration to the new path." + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "volume %s stuck in a downloading state" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "Resuming any in progress delete operations" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "Resuming delete on volume: %s" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creating from snapshot" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creating from existing volume" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creating from image" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "volume %s: create failed" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "Error: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "volume %s: Error trying to reschedule create" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "Retry info not present, will not reschedule" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "No request spec, will not reschedule" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "volume is not local to this node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "volume %s: volume is busy" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "Failed to update usages deleting volume" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "snapshot %s: creating" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "snapshot %(snap_name)s: creating" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "snapshot %s: created successfully" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "snapshot %s: deleting" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "snapshot %s: snapshot is busy" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "Failed to update usages deleting snapshot" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "snapshot %s: deleted successfully" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "being attached by another instance" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "Downloaded image %(image_id)s to %(volume_id)s successfully" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "Clear capabilities" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "Notification {%s} received" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "DB error: %s" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "id cannot be None" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "name cannot be None" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" +"Default volume type is not found, please check default_volume_type " +"config: %s" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "Running with CoraidDriver for ESM EtherCLoud" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "Update session cookie %(session)s" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "Message : %(message)s" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "Error while trying to set group: %(message)s" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "Unable to find group: %(group)s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "ESM urlOpen error" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "JSON Error" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "Request without URL" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "Configure data : %s" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "Configure response : %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "Unable to retrive volume infos for volume %(volname)s" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "Cannot login on Coraid ESM" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Fail to create volume %(volname)s" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Failed to delete volume %(volname)s" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Failed to Create Snapshot %(snapname)s" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "Failed to Delete Snapshot %(snapname)s" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "Failed to Create Volume from Snapshot %(snapname)s" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "There's no Gluster config file configured (%s)" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "Gluster config file at %(config)s doesn't exist" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "mount.glusterfs is not installed" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "casted to %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "Volume %s does not have provider_location specified, skipping" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "Exception during mounting %s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "%s is already mounted" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "Size for volume: %s not found, skipping secure delete." + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "Performing secure delete on volume: %s" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "Error unrecognized volume_clear option: %s" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "snapshot: %s not found, skipping delete operations" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "Creating clone of volume: %s" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "Skipping ensure_export. No iscsi_target provision for volume: %s" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "Skipping ensure_export. No iscsi_target provisioned for volume: %s" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "Detected inconsistency in provider_location id" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "Symbolic link %s not found" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "Skipping remove_export. No iscsi_target provisioned for volume: %s" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "Error retrieving volume status: " + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "There's no NFS config file configured (%s)" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "NFS config file at %(config)s doesn't exist" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd has no pool %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "error refreshing volume stats" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "Not stored in rbd" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "Blank components" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "Not an rbd snapshot" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "%s is in a different ceph cluster" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "Unable to read image %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "Value required for 'scality_sofs_config'" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "Cannot access 'scality_sofs_config': %s" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "Cannot execute /sbin/mount.sofs" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "Cannot mount Scality SOFS, check syslog for errors" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "Cannot find volume dir for Scality SOFS at '%s'" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "Payload for SolidFire API call: %s" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "Call to json.loads() raised an exception: %s" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "Results of SolidFire API call: %s" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "Clone operation encountered: %s" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "Waiting for outstanding operation before retrying snapshot: %s" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "Detected xDBVersionMismatch, retry %s of 5" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "API response: %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "Found solidfire account: %s" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire account: %s does not exist, create it..." + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "Failed to get model update from clone" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "More than one valid preset was detected, using %s" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "Volume %s, not found on SF Cluster." + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "Found %(count)s volumes mapped to id: %(uuid)s." + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "Enter SolidFire delete_volume..." + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "Account for Volume ID %s was not found on the SolidFire Cluster!" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "This usually means the volume was never succesfully created." + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "Volume ID %s was not found on the SolidFire Cluster!" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "Leaving SolidFire delete_volume" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "Executing SolidFire ensure_export..." + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "Executing SolidFire create_export..." + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "Updating cluster status info" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "Failed to get updated stats" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "WWPN on node %(node)s: %(wwpn)s" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "enter: do_setup" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "pool %s doesn't exist" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "Failed to get license information." + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "do_setup: No configured nodes" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "leave: do_setup" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "enter: check_for_setup_error" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "%s is not set" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "leave: check_for_setup_error" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "ensure_export: Volume %s not found on storage" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "enter: _get_chap_secret_for_host: host name %s" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "_create_host: Cannot clean host name. Host name is not unicode or string" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "enter: _get_host_from_connector: prefix %s" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "leave: _get_host_from_connector: host %s" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "enter: _create_host: host %s" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "_create_host: No connector ports" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "leave: _create_host: host %(host)s - %(host_name)s" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: creating from snapshot" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "enter: _delete_host: host %s " + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "leave: _delete_host: host %s " + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "enter: initialize_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "_create_host failed to return the host name." + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "initialize_connection: Failed to get attributes for volume %s" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "Did not find expected column name in lsvdisk: %s" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "initialize_connection: Missing volume attribute for volume %s" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "initialize_connection: Did not find a preferred node for volume %s" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "enter: terminate_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "_get_host_from_connector failed to return the host name for connector" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "leave: terminate_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "protocol must be specified as ' iSCSI' or ' FC'" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "enter: _create_vdisk: vdisk %s " + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "leave: _create_vdisk: volume %s " + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "_prepare_fc_map: %s" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "_create_copy: Source vdisk %s does not exist" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "enter: _get_flashcopy_mapping_attributes: mapping %s" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "enter: _is_vdisk_defined: vdisk %s " + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "enter: _delete_vdisk: vdisk %s" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "warning: Tried to delete vdisk %s but it does not exist." + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "leave: _delete_vdisk: vdisk %s" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "create_volume_from_snapshot: Source and destination size differ." + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "create_cloned_volume: Source and destination size differ." + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "_update_volume_status: Could not get system name" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "Could not get pool data from the storage" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "_update_volume_status: Could not get storage pool data" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "System does not support compression" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "If compression is set to True, rsize must also be set (not equal to -1)" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "enter: _execute_command_and_parse_attributes: command %s" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "Did not find expected column in %(fun)s: %(hdr)s" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "Creating folder %s " + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "Ignored target creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "Disk not found: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "Sending %(method)s to %(url)s. Body \"%(body)s\"" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "Operation completed. %(data)s" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Volume %(name)s could not be found. It might be already deleted" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "Attach properties: %(properties)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "Entering create_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "Create Volume: %(volume)s Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "Create Volume: %(volume)s Storage type: %(storage_type)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "Create Volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "Leaving create_volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "Entering create_volume_from_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "Entering create_cloned_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "Entering delete_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "Delete Volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "Volume %(name)s not found on the array. No volume to delete." + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "Delete Volume: %(name)s DeviceID: %(deviceid)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "Entering create_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Create snapshot: %(snapshot)s: volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "Cannot find Replication Service to create snapshot for volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "Entering delete_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Delete Snapshot: %(snapshot)s: volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "Create export: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "create_export: Volume: %(volume)s Device ID: %(device_id)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "Error mapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ExposePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "Error unmapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "HidePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "Error mapping volume %(vol)s. %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "AddMembers for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "Error unmapping volume %(vol)s. %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "RemoveMembers for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "Map volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "Cannot find Controller Configuration Service for storage system %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "Unmap volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "Volume %s is not mapped. No volume to unmap." + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "Initialize connection: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "Volume %s is already mapped." + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "Terminate connection: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "Found Storage Type: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "Storage type not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "Found Masking View: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "Masking View not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "Ecom user not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "Ecom server not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "Cannot connect to ECOM server" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "Found Replication Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "Found Storage Configuration Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "Found Controller Configuration Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "Found Storage Hardware ID Management Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Pool %(storage_type)s is not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "Storage system not found for pool %(storage_type)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "Pool: %(pool)s SystemName: %(systemname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "Pool name: %(poolname)s System name: %(systemname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Volume %(volumename)s not found on the array." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Volume name: %(volumename)s Volume instance: %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "Source: %(volumename)s Target: %(snapshotname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "Error finding %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "Found %(name)s: %(initiator)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "Available device number on %(storage)s: %(device)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "Device number not found for volume %(volumename)s %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "Device info: %(data)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "Found Storage Processor System: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "Error finding Storage Hardware ID Service." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "Error finding Target WWNs." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "Add target WWN: %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "Target WWNs: %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "Cannot find device number for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Found iSCSI endpoint: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "ISCSI properties: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "read timed out" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "do_setup." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "check_for_setup_error." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "check_for_setup_error: Can not get device type." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +#, fuzzy +msgid "_get_device_type: Storage Pool must be configured." +msgstr "_get_device_type: Storage Pool must beconfigured." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "create_volume:volume name: %s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "delete_volume: volume name: %s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, fuzzy, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "delete_volume:No need to delete volume.Volume %(name)s does not exist." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "create_export: volume name:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "create_export:Volume %(name)s does not exist." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, fuzzy, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" +"initialize_connection:Failed to find target ipfor " +"initiator:%(initiatorname)s,please check config file." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, fuzzy, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "initialize_connection:Failed to find target iSCSIiqn. Target IP:%(ip)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, fuzzy, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" +"initialize_connection:host name: %(host)s,initiator name: %(ini)s, " +"hostport name: %(port)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "terminate_connection:Host does not exist. Host name:%(host)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, fuzzy, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "terminate_connection:volume does not exist.volume name:%(volume)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, fuzzy, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s,volume name:%(volumename)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "create_snapshot:Device does not support snapshot." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "create_snapshot:Resource pool needs 1GB valid size at least." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, fuzzy, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "create_snapshot:Volume does not exist.Volume name:%(name)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, fuzzy, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "create_snapshot:Snapshot does not exist.Snapshot name:%(name)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "delete_snapshot:Device does not support snapshot." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, fuzzy, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "create_volume_from_snapshot:Snapshot does not exist.Snapshot name:%(name)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "Config file is wrong. Controler IP, UserName and UserPassword must be set." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "_check_conf_file: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "_read_xml:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, fuzzy, python-format +msgid "Write login information to xml error. %s" +msgstr "Write login informationto xml error. %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "_get_login_info error. %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, fuzzy, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "_get_lun_set_info:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +#, fuzzy +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" +"_get_maximum_pool:maxpoolid is None.Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "_get_iscsi_info:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "CLI command:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "_execute_cli:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, fuzzy, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" +"_add_host:Failed to add host to hostgroup.host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, fuzzy, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "_add_initiator:Failed to add initiator.initiator name:%(name)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, fuzzy, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" +"_delete_initiator:ERROE:Failed to delete initiator.initiator " +"name:%(name)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, fuzzy, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" +"_add_hostport:Failed to add hostport. port name:%(port)sport " +"information:%(info)s host id:%(host)sout:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "_delete_hostport:Failed to delete host port. port id:%(portid)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "_get_tgt_iqn:iSCSI IP is %s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "_get_tgt_iqn:iSCSI target iqn is:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, fuzzy, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" +"_map_lun:Failed to add " +"hostmap.hostid:%(host)slunid:%(lun)shostlunid:%(hostlunid)s.out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, fuzzy, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "_delete_host: Failed delete host.host id:%(hostid)s.out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, fuzzy, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" +"_wait_for_luncopy:LUNcopy status isnot normal. LUNcopy " +"name:%(luncopyname)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, fuzzy, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" +"_change_lun_controller:Failed to change lun owningcontroller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +#, fuzzy +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "_is_resource_pool_enough:Resource pool for snapshotnot be added." + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "API %(name)s failed: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "Using WSDL: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "Using DFM server: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "Using storage service: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "Using storage service prefix: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "Using vfiler: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "Either netapp_storage_service or netapp_storage_service_prefix must be set" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "Connected to DFM server" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "Job failed: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "Failed to provision dataset member" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "No LUN was created by the provision job" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "No entry in LUN table for volume %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "Failed to remove and delete dataset LUN member" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "Failed to remove and delete dataset Qtree member" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "No entry in LUN table for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Failed to get LUN details for LUN ID %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Failed to get host details for host ID %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "No LUN ID for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Failed to get target portal for filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Failed to get target IQN for filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "No metadata property %(prop)s defined for the LUN %(name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "Success getting LUN list from server" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "Created LUN with name %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "Destroyed LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "Mapped LUN %(handle)s to the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Failed to get LUN target details for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Failed to get target portal for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Failed to get target IQN for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "No entry in LUN table for snapshot %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "Object is not a NetApp LUN." + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "Cloned LUN with new name %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Could not find handle for LUN named %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "Using NetApp filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "No entry in LUN table for volume/snapshot %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "Mapped LUN %(name)s to the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "Snapshot %s deletion successful" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "Unmapped LUN %(name)s from the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get vol with required size for volume: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "Error mapping lun. Code :%(code)s, Message:%(message)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "Error unmapping lun. Code :%(code)s, Message:%(message)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Could not find attribute for LUN named %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "No iscsi service found for vserver %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "No clonned lun named %s found on the filer" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "Error finding luns for volume %(vol)s. Verify volume exists." + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "Clone operation with src %(name)s and dest %(new_name)s completed" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "Clone operation with src %(name)s and dest %(new_name)s failed" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "No interface found on cluster for ip %s" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "No storage path found for export path %s" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "Cloning with src %(src_path)s, dest %(dest_path)s" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "Nexenta SA returned the error" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "Sending JSON data: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "Auto switching to HTTPS connection to %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "No headers in server response" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "Bad response from server" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "Got response: %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Volume %s does not exist in Nexenta SA" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "Ignored target group creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "Ignored target group member addition error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "Ignored LU creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "Ignored LUN mapping entry addition error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "CLIQ command returned %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Volume info: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "local_path not supported" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "Error running SSH command: %s" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "Specify san_password or san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip must be set" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "Cannot parse list-view output: %s" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID not found for %(zfs_poolname)s. Output=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "Error running ssh command: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "CPG (%s) doesn't exist on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "Volume (%s) already exists on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unexpected state while cloning %s" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "Login to 3PAR array invalid" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "The hostname must be called '%s'" + diff --git a/cinder/locale/es/LC_MESSAGES/cinder.po b/cinder/locale/es/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..69c0cb128b --- /dev/null +++ b/cinder/locale/es/LC_MESSAGES/cinder.po @@ -0,0 +1,5614 @@ +# Spanish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-10 06:08+0000\n" +"Last-Translator: Oscar Rosario \n" +"Language-Team: Spanish \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Coneccion con glance fallida" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "No Autorizado" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "El usuario no tiene privilegios de administrador" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "no hay método para el mensaje: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Parametros inaceptables" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Captura no valida" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Fallo al ingresar informacion en formato json" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "La petición es inválida." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "La petición es inválida." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Entrada invalida recibida" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Tipo de contenido invalido %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "El servicio no esta disponible en este momento" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "el grupo %s ya existe" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" +"No es posible crear el VDI en SR %(sr_ref)s para la instancia " +"%(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "backend inválido: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Coneccion con glance fallida" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Excepción interna: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "configurando la red del host" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "No encontrado: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de opciones (FLAGS):" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Obteniendo %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Ejecutando cmd (subprocesos): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "El resultado fue %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "corriendo cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Conectando a libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "Depuración de la devolución de llamada: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "No se encuentra la dirección del enlace local.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "backend inválido: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Cambio de clave secreta para el usuario %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado error: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instancia %s: creando snapshot" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Crear volumen de %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La petición es inválida." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Reiniciando instancia %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Imposible desasociar volumen %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Obtenida excepción %s" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "recibido %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Conjunto completo de opciones (FLAGS):" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "recibido %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Reuso invalido de una coneccion RPC" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "contenido desempaquetado: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Des-registrando la imagen %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "recibido %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La petición es inválida." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake no tiene una implementación para %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Debe de implementar un horario de reserva" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Captura no valida" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Desasociar volumen %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: saltando exportación" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volumen %s: creado satisfactoriamente" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Capturado error: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volumen no local a este nodo" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instancia %s: creando snapshot" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Fallo a reinicia la instancia" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "el grupo %s ya existe" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Crear volumen de %s GB" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Captura no valida" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Fallo al suspender la instancia" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: saltando exportación" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Borrando usuario %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "No se encuentra la dirección del enlace local.:%s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Exportando de nuevo los volumenes %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Desasociar volumen %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconectado a la cola" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Ejecutando instancias: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconectado a la cola" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Ejecutando instancias: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "el grupo %s ya existe" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/fi_FI/LC_MESSAGES/cinder.po b/cinder/locale/fi_FI/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..87cea00182 --- /dev/null +++ b/cinder/locale/fi_FI/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Finnish (Finland) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Finnish (Finland) " +"(http://www.transifex.com/projects/p/openstack/language/fi_FI/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/fr/LC_MESSAGES/cinder.po b/cinder/locale/fr/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..2a22497847 --- /dev/null +++ b/cinder/locale/fr/LC_MESSAGES/cinder.po @@ -0,0 +1,5620 @@ +# French translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-06 14:54+0000\n" +"Last-Translator: EmmanuelLeNormand \n" +"Language-Team: French \n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "La connexion à Glance a échoué" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Non autorisé." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "L’utilisateur n'a pas les privilèges administrateur" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Pas de méthode pour le message : %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Paramètres inacceptables." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Snapshot invalide" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Le volume %(volume_id)s n'est lié à rien" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Échec du chargement des données au format JSON" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "La requête est invalide." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "La requête est invalide." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Entrée invalide reçue" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Type de volume invalide" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Volume invalide" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Le type de contenu %(content_type)s est invalide" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "Le service est indisponible actuellement." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "le groupe %s existe déjà" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" +"Impossible de créer VDI sur SR %(sr_ref)s pour l'instance " +"%(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Backend invalide : %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "La connexion à Glance a échoué" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Exception interne : %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "adresse de départ" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Démarrage du noeud %(topic)s (version %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Service détruit sans entrée dans la base de données" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "L'objet du service de base de données à disparru, re-création en cours." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Récupération du modelle de connexion serveur terminée!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "Le modèle de serveur à disparu" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Ensemble de propriétés complet :" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Récupèration de %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Execution de la commande (sous-processus) : %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Le résultat était %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Execution de la cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Connexion à libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "Debug dans le rappel : %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "L'adresse du lien local n'a pas été trouvé :%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend invalide : %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "Vous devez implémenter __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Clef secrète changée pour l'utilisateur %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Erreur interceptée : %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Création d'un volume de %s Go" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La requête est invalide." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Redémarrage de l'instance %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Impossible de détacher le volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "L'utilisation d'une requête de contexte vide est dévalué" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Reçu exception : %s" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "%s reçu" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Ensemble de propriétés complet :" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "Contexte décompacté : %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "%s reçu" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID est %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Une exception inconnue s'est produite." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Réutilisation invalide d'une connexion RPC" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "Contexte décompacté : %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Dé-enregitrement de l'image %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "%s reçu" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La requête est invalide." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake n'a pas d'implémentation pour %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAUX ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Snapshot invalide" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Dé-montage du volume %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Récupération après une exécution erronée. Tentative numéro %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Ré-exportation de %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s : exportation évitée" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: crée avec succès" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Erreur interceptée : %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: suppression" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Le volume n'est pas local à ce noeud" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: suppression de l'exportation" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Échec du redémarrage de l'instance" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested renvoi %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "le groupe %s existe déjà" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Création d'un volume de %s Go" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd n'as pas de file %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Snapshot invalide" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog n'est pas actif" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Échec de la suspension de l'instance" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s : exportation évitée" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Suppression de l'utilisateur %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Ré-exportation de %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Erreur au démarrage xvp : %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconnection à la queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Instance actives : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconnection à la queue" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested renvoi %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Instance actives : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "le groupe %s existe déjà" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/it/LC_MESSAGES/cinder.po b/cinder/locale/it/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f65735c064 --- /dev/null +++ b/cinder/locale/it/LC_MESSAGES/cinder.po @@ -0,0 +1,5605 @@ +# Italian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-01 18:59+0000\n" +"Last-Translator: simone.sandri \n" +"Language-Team: Italian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "L'utente non ha i privilegi dell'amministratore" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "nessun metodo per il messaggio: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Parametri inaccettabili." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "La richiesta non è valida." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "La richiesta non è valida." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "E' stato ricevuto un input non valido" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Tipo del volume non valido" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Volume non valido" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Impossible creare il VDI su SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Eccezione interna: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "model server é scomparso" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Insieme di FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Prelievo %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Il risultato é %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Eseguendo cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La richiesta non è valida." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Riavviando l'istanza %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Insieme di FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "contesto decompresso: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La richiesta non è valida." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "E' stato ricevuto un input non valido" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume %s: rimuovendo" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: creato con successo" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: rimuovendo" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume ancora collegato" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Impossibile sospendere l'istanza" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Riconnesso alla coda" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Riconnesso alla coda" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ja/LC_MESSAGES/cinder.po b/cinder/locale/ja/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..e5c816d40e --- /dev/null +++ b/cinder/locale/ja/LC_MESSAGES/cinder.po @@ -0,0 +1,5606 @@ +# Japanese translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "グループ %s は既に存在しています。" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s における VDI を作成できません" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "不正なバックエンドです: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "libvirtへの接続が切れています。" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "内側で発生した例外: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "開始アドレス" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "NotFound 発生: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "%(topic)s ノードを開始しています (バージョン %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "FLAGSの一覧:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "ファイルをフェッチ: %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "コマンド実行(subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "コマンド実行結果: %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "コマンド(SSH)を実行: %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "libvirt %s へ接続します。" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "コールバック中のデバッグ: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "リンクローカルアドレスが見つかりません: %s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "%(interface)s のローカルIPアドレスのリンクが取得できません:%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "不正なバックエンドです: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "バックエンドは %s です。" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "__call__ を実装しなければなりません" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "ボリューム %s を切断(detach)できません" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "例外 %s が発生しました。" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "受信: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "FLAGSの一覧:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "受信: %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "context %s をアンパックしました。" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "De-registering image: イメージ %s を登録解除します。" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "受信: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake には %s が実装されていません。" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "ボリューム %s の作成に成功しました。" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "エラー %s をキャッチしました。" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "ネストした戻り値: %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "グループ %s は既に存在しています。" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd にプール %s がありません。" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog が機能していません" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Deleting user: ユーザ %s を削除します。" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "xvp の開始中にエラー: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "キューに再接続しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "インスタンス %s は実行中です。" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "キューに再接続しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "ネストした戻り値: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "インスタンス %s は実行中です。" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "グループ %s は既に存在しています。" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ko/LC_MESSAGES/cinder.po b/cinder/locale/ko/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..6731edc57f --- /dev/null +++ b/cinder/locale/ko/LC_MESSAGES/cinder.po @@ -0,0 +1,5593 @@ +# Korean translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-12-16 04:42+0000\n" +"Last-Translator: Zhongyue Luo \n" +"Language-Team: Korean \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR에 대한 VDI 생성이 실패했습니다" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "인스턴스 %s를 재부팅합니다" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..c1f7387564 --- /dev/null +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Korean (South Korea) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) " +"(http://www.transifex.com/projects/p/openstack/language/ko_KR/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..22b4551c9b --- /dev/null +++ b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po @@ -0,0 +1,5604 @@ +# Brazilian Portuguese translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-02-06 21:07+0000\n" +"Last-Translator: Adriano Steffler \n" +"Language-Team: Brazilian Portuguese \n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Erro inesperado ao executar o comando." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de saída: %(exit_code)s\n" +"Saída padrão: %(stdout)r\n" +"Erro: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "sem método para mensagem: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "group %s já existe" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" +"Não é possível criar o VDI no SR %(sr_ref)s para a instância " +"%(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Backend inválido: %s" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Exceção interna: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "NotFound lançado: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "servidor de modelo perdido" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Buscando %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Resultado foi %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Executando o comando (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "depuração em retorno de chamada: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Endereço para Link Local não encontrado: %s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Não foi possível atribuir um IP para o Link Local de %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Não é possível destruir o VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Reiniciando a instância %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "recebido %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Conjunto completo de FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "recebido %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Removendo o registro da imagem %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "recebido %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake não tem uma implementação para %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Desanexar volume %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exportando %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: ignorando export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: criado com sucesso" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Capturado o erro: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: removendo" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "O volume não pertence à este node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removendo export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Começando a terminar instâncias" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "group %s já existe" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Criar volume de %s GB" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Começando a terminar instâncias" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: ignorando export" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de saída: %(exit_code)s\n" +"Saída padrão: %(stdout)r\n" +"Erro: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Apagando usuário %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Endereço para Link Local não encontrado: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exportando %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Desanexar volume %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconectado à fila" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconectado à fila" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "group %s já existe" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ru/LC_MESSAGES/cinder.po b/cinder/locale/ru/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..bb1482b9fa --- /dev/null +++ b/cinder/locale/ru/LC_MESSAGES/cinder.po @@ -0,0 +1,5660 @@ +# Russian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-25 09:34+0000\n" +"Last-Translator: Eugene Marshal \n" +"Language-Team: Russian \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Неожиданная ошибка при выполнении команды." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Команда: %(cmd)s\n" +"Код выхода: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Сбой соединения с glance" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Не авторизировано." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "Пользователь не имеет административных привилегий" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Политика не допускает выполнения %(action)s." + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Ядро не найдено для образа %(image_id)s." + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Недопустимые параметры." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Недопустимый снимок" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Том %(volume_id)s никуда не присоединён" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Ошибка загрузки данных в формат json" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "Недопустимый запрос." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "Недопустимый запрос." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Недопустимый тип тома" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Недопустимый том" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Недопустимый тип содержимого %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "В данный момент служба недоступна." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Образ %(image_id)s недопустим: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "Ресурс не может быть найден." + +#: cinder/exception.py:229 +#, fuzzy, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "Том %(volume_id)s не найден." + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Том %(volume_id)s не найден." + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Не найден том для копии %(instance_id)s." + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "Недопустимые метаданные" + +#: cinder/exception.py:255 cinder/exception.py:268 +#, fuzzy +msgid "Invalid metadata size" +msgstr "Неправильный ключ метаданных" + +#: cinder/exception.py:259 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Копия %(instance_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Тип тома %(volume_type_id)s не может быть найден." + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Тип тома %(volume_type_id)s не имеет дополнительных особенностей с ключом" +" %(extra_specs_key)s." + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Снимок %(snapshot_id)s не может быть найден." + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "удаление тома %(volume_name)s, который имеет снимок" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "Отсутствует диск в %(location)s" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Недопустимый образ href %(image_href)s." + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Образ %(image_id)s не найден." + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Служба %(service_id)s не найдена." + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Узел %(host)s не найден." + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Узел сompute %(host)s не найден." + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "Квота не найдена" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:368 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:372 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:376 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Пользователь %(user_id)s не найден." + +#: cinder/exception.py:380 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Превышена квота" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Перемещение %(migration_id)s не найдено." + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Перемещение не найдено для копии %(instance_id)s в состоянии %(status)s." + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "Файл %(file_path)s не может быть найден." + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "Действие не разрешено." + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Тип тома %(name)s уже существует." + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "Ошибка перемещения" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Невозможно найти конфигурацию по адресу %(path)s" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Допустимый узел не найден. %(reason)s" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Узел %(host)s не работает или не существует." + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "Превышена квота" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Невозможно создать volume_type с именем %(name)s и спецификациями " +"%(extra_specs)s" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" + +#: cinder/exception.py:499 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Недопустимое состояние сервера: %(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Невозможно создать VDI на SR %(sr_ref)s для копии %(instance_name)s" + +#: cinder/exception.py:511 +#, fuzzy, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "Невозможно найти узел для копии %s" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Копия %(instance_id)s не найдена." + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Группа LDAP %(group_id)s не найдена." + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Недопустимый внутренний интерфейс: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Сбой соединения с glance" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" +"Пропуск %(full_task_name)s, %(ticks_to_skip)s раз осталось, для " +"произведения следующего запуска" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Запуск повторяющегося задания %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Ошибка во время %(full_task_name)s: %(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, fuzzy, python-format +msgid "Caught %s, exiting" +msgstr "снимок %s: удаление" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Вложенное исключение: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "установка сетевого узла" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Запуск узла сети (версия %(vcs_string)s) %(topic)s" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Служба завершила работу из-за отсутствия записи базы данных" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Полный набор ФЛАГОВ:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Получение %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Приняты неизвестные аргументы ключевого слова для utils.execute: %r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Выполнение команды (субпроцесс): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Результат %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r ошибка. Выполняется повтор." + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Выполнение команды (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "Среда не поддерживается с использованием SSH" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "process_input не поддерживается с использованием SSH" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +#, fuzzy +msgid "Specify a password or private_key" +msgstr "Задайте san_password или san_private_key" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Подключение к libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "отладка в обратном вызове: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Недопустимый внутренний интерфейс: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "внутренний интерфейс %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "Ожидался объект типа: %s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' заняла %(total_time).2f с." + +#: cinder/utils.py:1105 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Ошибка удаления контейнера: %s" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Невозможно найти адрес %r" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Невозможно найти адрес %r" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Невозможно найти адрес %r" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, fuzzy, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "Выполняется %(name)s на %(host)s:%(port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Выполняется останов сервера WSGI." + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "Сервер WSGI был остановлен." + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "маркер [%s] не найден" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s не содержит версию" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "Инициализация диспетчера расширений." + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "Загруженное расширение: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "Загрузка расширения %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "элемент не является потомком" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, fuzzy, python-format +msgid "Delete backup with id: %s" +msgstr "Удалить снимок с идентификатором: %s" + +#: cinder/api/contrib/backups.py:185 +#, fuzzy, python-format +msgid "Creating new backup %s" +msgstr "Создание SR %s" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "Неправильный формат тела запроса" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +#, fuzzy +msgid "Snapshot not found." +msgstr "Узел не найден" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "Узел %(host)s не найден." + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Недопустимое состояние: '%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Недопустимый параметр обновления: '%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Перевод узла %(host)s в %(state)s." + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "Узел не найден" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +#, fuzzy +msgid "Request body empty" +msgstr "Неправильный формат тела запроса" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "Тело запроса и URI не совпадают" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "Тело запроса содержит избыточное количество объектов" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "Недопустимый запрос тела" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Обнаружена ошибка: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s возвратил с HTTP %(status)d" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" +"Расширение %(ext_name)s: Невозможно расширить ресурс %(collection)s: Нет " +"такого ресурса" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Расширение %(ext_name)s расширение ресурса: %(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "В запросе предоставлен не распознанный тип-содержимого" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "Тип содержимого не предоставлен в запросе" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "Пустое тело предоставлено в запросе" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "Неправильное тело запроса" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Не поддерживаемый тип содержимого" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Неправильный запрос url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s возвратил ошибку: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, " +"каждые %(unit_string)s." + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "Копия не существует" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "Элемент метаданных не найден" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "Удалить снимок с идентификатором: %s" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Создать снимок из тома %s" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "Сервер не существует" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "Удалить том с идентификатором: %s" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" + +#: cinder/api/v1/volumes.py:418 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" + +#: cinder/api/v2/volumes.py:359 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, fuzzy, python-format +msgid "unsupported compression algorithm: %s" +msgstr "неподдерживаемый раздел: %s" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Копия не существует" + +#: cinder/backup/services/swift.py:127 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "Копия не существует" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "Ожидался объект типа: %s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "Недопустимый запрос." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, fuzzy, python-format +msgid "delete %s finished" +msgstr "_удалить: %s" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Перезагрузка копии %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Невозможно отсоединить том %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Невозможно найти том %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Невозможно найти том %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "Нераспознанное значение read_deleted '%s'" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "Ошибка соединения с SQL (%(connstring)s). %(attempts)d попыток осталось." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "Таблица |%s| не создана!" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "Ошибка анализа 'qemu-img info'." + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Преобразование в необработанный, но текущий формат %s" + +#: cinder/image/image_utils.py:278 +#, fuzzy, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "Преобразование в необработанный, но текущий формат %s" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Исключение: %s" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "Исходное исключение было сброшено" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "_удалить: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Класс %(fullname)s устарел: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Полный набор ФЛАГОВ:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s не в допустимых приоритетах" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Неполадка '%(e)s', попытка отправить в систему уведомлений. " +"Нагрузка=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "неизвлечённый контекст: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "получено %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, fuzzy, python-format +msgid "Making synchronous call on %s ..." +msgstr "Выполнение асинхронного вызова %s ..." + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Обнаружено неизвестное исключение." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"Удалённая ошибка: %(exc_type)s %(value)s\n" +"%(traceback)s." + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Возврат исключения %s вызывающему" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Повторное подключение к серверу AMQP на %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Подключение к серверу AMQP на %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Невозможно подключиться к серверу AMQP на %(hostname)s:%(port)d после " +"%(max_retries)d попыток: %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP сервер на %(hostname)s:%(port)d недоступен: %(err_str)s. Повторная " +"попытка через %(sleep_time)d секунд." + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Ошибка объявления потребителю темы '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Ошибка принятия сообщения из очереди: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Ошибка публикации сообщения в тему '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, fuzzy, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Невозможно подключиться к серверу AMQP: %s " + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "неизвлечённый контекст: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Исключение регистрации ВМ %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +#, fuzzy +msgid "In reactor registered" +msgstr "Отсутствуют зарегистрированные ВМ" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, fuzzy, python-format +msgid "Could not create IPC directory %s" +msgstr "Ошибка удаления контейнера: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, fuzzy, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "Заданные данные: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +#, fuzzy +msgid "Creating payload" +msgstr "Создание изображения" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "получено %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "Недопустимый запрос." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake не имеет реализации для %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "Выполняется ли соответствующая служба?" + +#: cinder/scheduler/chance.py:56 +#, fuzzy +msgid "Could not find another host" +msgstr "Невозможно найти другой compute" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "Отфильтрованы %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, fuzzy, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Ошибка schedule_%(method)s: %(ex)s" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "Невозможно декодировать параметры расписания: '%(e)s'" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Имитация выполнения команды (субпроцесс): %s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "Имитация команды привела к исключению %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, fuzzy, python-format +msgid "unrecognized argument %s" +msgstr "Нераспознанное значение read_deleted '%s'" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, fuzzy, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "Заданные данные: %s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "Итоговые данные: %s" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Недопустимый снимок" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Отсоединить том %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"Код состояния: %(_status)s\n" +"Тело: %(_body)s" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "Ошибка аутентификации" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "Ошибка авторизации" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "объект не найден" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Выполнение %(method)s на %(relative_url)s" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "Тело: %s" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "Непредвиденный код состояния" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "Декодирование JSON: %s" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:177 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "Поиск по: %s" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:546 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "Превышена квота для %(pid)s, попытка выполнить %(min_count)s копий" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:719 +#, fuzzy +msgid "Volume status is in-use." +msgstr "том %s: том занят" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Восстановление после недопустимого выполнения. Попытка номер %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "iSCSI-устройство не найдено в %s" + +#: cinder/volume/driver.py:439 +#, fuzzy, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" +"Том ISCSI не найден в: %(mount_device)s. Будет выполнена повторная " +"проверка и попытка. Повторение: %(tries)s" + +#: cinder/volume/driver.py:451 +#, fuzzy, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "Найден узел iSCSI %(mount_device)s (после %(tries)s повторных проверок)" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +#, fuzzy +msgid "Updating volume status" +msgstr "Обновление состояния узла" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Повторное экспортирование %s томов" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "том %s: пропуск экспортирования" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "том %s: создание завершено" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Ошибка БД: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "том %s: удаление" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Том до сих пор присоединён" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "том %s: удаление экспортирования" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "том %s: том занят" + +#: cinder/volume/manager.py:441 +#, fuzzy +msgid "Failed to update usages deleting volume" +msgstr "Ошибка обновления агента: %(resp)r" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "том %s: удаление завершено" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "снимок %s: создание" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "снимок %(snap_name)s: создание" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "снимок %s: создание завершено" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "снимок %s: удаление" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "снимок %s: создание завершено" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "снимок %s: удаление выполнено" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Подключить том %(volume_id)s для копии %(instance_id)s на %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "Очистить возможности" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "Принято уведомление {%s}" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "Ошибка БД: %s" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Невозможно найти адрес %r" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +#, fuzzy +msgid "JSON Error" +msgstr "Ошибка перемещения" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, fuzzy, python-format +msgid "Configure data : %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Невозможно найти том %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Невозможно найти том %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Ошибка перезагрузки копии" + +#: cinder/volume/drivers/coraid.py:347 +#, fuzzy, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "снимок %(snap_name)s: создание" + +#: cinder/volume/drivers/coraid.py:362 +#, fuzzy, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "Создать том из снимка %s" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "_создать: %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "образ уже присоединён" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "маркер [%s] не найден" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd не имеет пула %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Недопустимый снимок" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Невозможно найти том %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog не выполняется: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog не выполняется" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +#, fuzzy +msgid "Updating cluster status info" +msgstr "Обновление состояния узла" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Невозможно получить обновлённое состояние: %s" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, fuzzy, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "Перевод узла %(host)s в %(state)s." + +#: cinder/volume/drivers/storwize_svc.py:574 +#, fuzzy, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "удаление тома %(volume_name)s, который имеет снимок" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "том %s: пропуск экспортирования" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, fuzzy, python-format +msgid "_prepare_fc_map: %s" +msgstr "_создать: %s" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Команда: %(cmd)s\n" +"Код выхода: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Создание SR %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Узел не найден" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Том %(volume_id)s не найден." + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "Создать том из снимка %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "Удалить том с идентификатором: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Повторное экспортирование %s томов" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Присоединить том %(volume_id)s к копии %(server_id)s на %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Ошибка запуска xvp: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +#, fuzzy +msgid "Storage type not found." +msgstr "образ не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +#, fuzzy +msgid "Masking View not found." +msgstr "образ не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +#, fuzzy +msgid "Ecom user not found." +msgstr "Сервер не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +#, fuzzy +msgid "Ecom server not found." +msgstr "Сервер не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Переподлючено к очереди" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Полномочия %(role_id)s не могут быть найдены." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Том %(volume_id)s не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Не найден том для копии %(instance_id)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "Ошибка поиска vdis в SR %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, fuzzy, python-format +msgid "delete_volume: volume name: %s." +msgstr "Удалить том с идентификатором: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, fuzzy, python-format +msgid "create_export: volume name:%s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, fuzzy, python-format +msgid "_read_xml:%s" +msgstr "_создать: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, fuzzy, python-format +msgid "_execute_cli:%s" +msgstr "_удалить: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, fuzzy, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "Образ %(image_id)s недопустим: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, fuzzy, python-format +msgid "Using DFM server: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Выполняемые копии: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "Добавление правила поставщика: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Переподлючено к очереди" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +#, fuzzy +msgid "Failed to provision dataset member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +#, fuzzy +msgid "Failed to remove and delete dataset LUN member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +#, fuzzy +msgid "Failed to remove and delete dataset Qtree member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, fuzzy, python-format +msgid "Created LUN with name %s" +msgstr "Создана папка с адресом %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Выполняемые копии: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "снимок %s: удаление выполнено" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +#, fuzzy +msgid "Nexenta SA returned the error" +msgstr "Сервер возвратил ошибку: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "Возврат команды CLIQ %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" +"Неправильный ответ на команду CLIQ %(verb)s %(cliq_args)s. " +"Результат=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Ошибка выполнения команды CLIQ %(verb)s %(cliq_args)s. Результат=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Непредвиденное количество виртуальных ip для кластера %(cluster_name)s. " +"Результат=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Сведения о томе: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "local_path не поддерживается" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "Задайте san_password или san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip должен быть назначен" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID не найден для %(zfs_poolname)s. Вывод=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "группа %s уже существует" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Неожиданная ошибка при выполнении команды." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/tl/LC_MESSAGES/cinder.po b/cinder/locale/tl/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..7e7da70c70 --- /dev/null +++ b/cinder/locale/tl/LC_MESSAGES/cinder.po @@ -0,0 +1,5575 @@ +# Tagalog translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: Tagalog \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Walang paraan para sa mensahe: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Muling kumonekta sa queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Muling kumonekta sa queue" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/tr/LC_MESSAGES/cinder.po b/cinder/locale/tr/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..89a33aa59d --- /dev/null +++ b/cinder/locale/tr/LC_MESSAGES/cinder.po @@ -0,0 +1,5573 @@ +# Turkish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-12-14 18:10+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Turkish \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/uk/LC_MESSAGES/cinder.po b/cinder/locale/uk/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..751ff76fce --- /dev/null +++ b/cinder/locale/uk/LC_MESSAGES/cinder.po @@ -0,0 +1,5576 @@ +# Ukrainian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: Ukrainian \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "без порядку для повідомлень: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "отримано %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "отримано %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "отримано %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Від'єднати том %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Від'єднати том %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Оновлено з'єднання до черги" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Оновлено з'єднання до черги" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/vi_VN/LC_MESSAGES/cinder.po b/cinder/locale/vi_VN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..32cab9dc7c --- /dev/null +++ b/cinder/locale/vi_VN/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Vietnamese (Vietnam) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Vietnamese (Viet Nam) " +"(http://www.transifex.com/projects/p/openstack/language/vi_VN/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..5af90c372b --- /dev/null +++ b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po @@ -0,0 +1,5657 @@ +# Chinese (Simplified) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-03 23:36+0000\n" +"Last-Translator: cheesecake \n" +"Language-Team: Chinese (Simplified) \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "运行命令时出现意外错误" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"命令:%(cmd)s\n" +"退出代码:%(exit_code)s\n" +"标准输出:%(stdout)r\n" +"标准错误输出:%(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "数据库异常被包裹。" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "发生未知异常。" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "连接到glance失败" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "未授权。" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "用户没有管理员权限" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "政策不允许 %(action)s 被执行。" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "没有为镜像 %(image_id)s 找到内核。" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "无法接受的参数。" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "无效的快照" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "卷 %(volume_id)s 没有附加任何东西" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "把数据加载为json格式失败" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "请求无效。" + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "请求无效。" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "收到无效的输入" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "无效的卷类型" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "无效的卷" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "无效的内容类型 %(content_type)s。" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "该时刻服务无法使用。" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "资源没有找到。" + +#: cinder/exception.py:229 +#, fuzzy, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/exception.py:237 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "无法找到帐户 %(account_name) on Solidfire 设备" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "无效的元数据" + +#: cinder/exception.py:255 cinder/exception.py:268 +#, fuzzy +msgid "Invalid metadata size" +msgstr "无效的元数据键" + +#: cinder/exception.py:259 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "实例 %(instance_id)s 没有键为 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "卷类型 %(volume_type_id)s 没有找到。" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "快照 %(snapshot_id)s 没有找到。" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "正在删除有快照的卷 %(volume_name)s" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:307 +#, fuzzy, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "在 %(location)s 没有磁盘" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "无效的镜像href %(image_href)s。" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "镜像 %(image_id)s 没有找到。" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "服务 %(service_id)s 没有找到。" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "主机 %(host)s 没有找到。" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" + +#: cinder/exception.py:339 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "配额没有找到。" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" + +#: cinder/exception.py:368 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#: cinder/exception.py:372 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" + +#: cinder/exception.py:376 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "用户 %(user_id)s 没有找到。" + +#: cinder/exception.py:380 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "超出配额" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "迁移 %(migration_id)s 没有找到。" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "没有为实例 %(instance_id)s 找到迁移其状态为 %(status)s 。" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "找不到文件 %(file_path)s。" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "操作不允许。" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "密钥对 %(key_name)s 已经存在。" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "卷类型 %(name)s 已经存在。" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "迁移错误" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "在 %(path)s 找不到配置文件。" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "找不到有效主机,原因是 %(reason)s。" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "主机 %(host)s 没有启动或者不存在。" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "超出配额" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "检测到不止一个名称为 %(vol_name) 的卷。" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "来自SolidFire API的错误响应" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "SolidFire API响应里发生错误:data=%(data)s" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" + +#: cinder/exception.py:499 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "无效的服务器状态:%(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "无法在存储库 %(sr_ref)s 上为实例 %(instance_name)s 创建 VDI" + +#: cinder/exception.py:511 +#, fuzzy, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "无法找到实例 %s 的宿主机" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "实例 %(instance_id)s 没有找到。" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "发生未知异常。" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "发生未知异常。" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "没有找到LDAP用户组 %(group_id)s。" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "无效的后台:%s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "连接到glance失败" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "跳过 %(full_task_name)s,到下次运行还剩下%(ticks_to_skip)s 跳。" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "正在运行周期性任务 %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "在 %(full_task_name)s 期间发生的错误:%(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "向调度器通报能力。" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "JSON文件表示策略。" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "请求的规则找不到时的检查缺省规则。" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, fuzzy, python-format +msgid "Caught %s, exiting" +msgstr "快照 %s:正在删除" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "内层异常:%s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "起始地址" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "未知的基文件:%s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "启动 %(topic)s 节点 (版本 %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "为服务 %s 创建消费者" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "因无数据库记录,服务已被中止" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "服务数据库对象消失,正在重新创建。" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "与模型服务器(model server)的连接已恢复!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "失去与模型服务器的连接" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "标记全集:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s:标记集合 " + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "正在抓取 %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "发现未知的 utils.execute 关键字参数:%r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "正在运行cmd (subprocess):%s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "运行结果为 %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r 失败,重试。" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "运行cmd (SSH):%s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "SSH上不支持环境变量" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "SSH上不支持的进程输入参数。" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +#, fuzzy +msgid "Specify a password or private_key" +msgstr "指定san_password或者san_private_key" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "正在连接 libvirt:%s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "回调中debug:%s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "本地IP地址没有找到:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "无法连接到 %(interface)s 的本地IP:%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "无效的后台:%s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "后台 %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "循环调用中。" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "期望的对象类型:%s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc:'%(name)s' 用了%(total_time).2f 秒" + +#: cinder/utils.py:1105 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "移除容器失败:%s" + +#: cinder/utils.py:1213 +#, fuzzy, python-format +msgid "Unknown byte multiplier: %s" +msgstr "未知的基文件:%s" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "无法找到地址 %r" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "无法找到地址 %r" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "无法找到地址 %r" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, fuzzy, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "启动%(name)s 位置在 %(host)s:%(port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "关闭WSGI服务器" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "WSGI服务器已经停止。" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "你必须执行 __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "limit 参数必须是整数" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "limit参数必须是正数" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "offset 参数必须是整数" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "offset 参数必须是正数" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "没有找到标记 [%s]" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s 不包含版本" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "正在初始化扩展管理员。" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "加载的扩展:%s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "Ext name: %s" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "Ext alias: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "Ext 描述: %s" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "Ext 命名空间: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "Ext updated: %s" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "加载扩展发生异常:%s" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "正在加载扩展 %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "调用扩展工厂 %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "加载扩展 %(ext_factory)s 失败:%(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "加载扩展 %(classpath)s 失败:%(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "加载扩展 %(ext_name)s 失败:%(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "元素不是子节点" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "根元素选择列表" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "模板数不匹配;把slave %(slavetag)s 添加到master %(mastertag)s" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "subclasses必须执行construct()!" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "修改用户 %s 的私钥" + +#: cinder/api/contrib/backups.py:143 +#, fuzzy, python-format +msgid "Delete backup with id: %s" +msgstr "删除id为 %s 的快照" + +#: cinder/api/contrib/backups.py:185 +#, fuzzy, python-format +msgid "Creating new backup %s" +msgstr "轮换出%d个备份" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "不正确的请求主体格式" + +#: cinder/api/contrib/backups.py:201 +#, fuzzy, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "卷 %(volume_id)s 正在 %(mountpoint)s 上启动" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +#, fuzzy +msgid "Snapshot not found." +msgstr "没有找到主机" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "无法理解XML" + +#: cinder/api/contrib/hosts.py:133 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "主机 %(host)s 没有找到。" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "无效的状态:'%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "无效的更新设置:'%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "把主机 %(host)s 设置为 %(state)s。" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource是只有管理员才能执行的功能。" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "没有找到主机" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +#, fuzzy +msgid "Request body empty" +msgstr "不正确的请求主体格式" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "请求主体和URI不匹配" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "请求主体包含太多items" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "无效的请求主体" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "抓到错误:%s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s 随HTTP %(status)d返回" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "必须明确一个ExtensionManager类" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "扩展资源:%s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "扩展%(ext_name)s:无法扩展资源 %(collection)s:没有那种资源" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "扩展资源的扩展 %(ext_name)s:%(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "无法理解JSON" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "过多主体密钥" + +#: cinder/api/openstack/wsgi.py:581 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "扩展资源:%s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "错误抛出: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP 异常抛出:%s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "请求中提供了无法识别的 Content-Type" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "请求中没有提供 Content-Type" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "请求中没有提供主体" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "没有该动作:%s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "错误格式的请求主体" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "不支持的Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "错误格式的请求url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s返回错误:%(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 %(unit_string)s。" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "这个请求受到频率限制。" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "实例不存在" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "元数据项目未找到" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "删除id为 %s 的快照" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "为卷 %s 创建快照" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "域不存在" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "删除id为 %s 的卷" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "提供了无效的imageRef。" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "创建 %s GB的卷" + +#: cinder/api/v1/volumes.py:418 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" + +#: cinder/api/v2/volumes.py:359 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "卷组状态必须可获取" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "状态必须可用" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "卷组状态必须可获取" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, fuzzy, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, fuzzy, python-format +msgid "unsupported compression algorithm: %s" +msgstr "不支持的分区:%s" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "实例不存在" + +#: cinder/backup/services/swift.py:127 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "实例不存在" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "期望的对象类型:%s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "请求无效。" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, fuzzy, python-format +msgid "delete %s finished" +msgstr "_delete: %s" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, fuzzy, python-format +msgid "Removing iscsi_target for: %s" +msgstr "正在删除基文件:%s" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "尝试删除不存在的控制台%(console_id)s。" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, fuzzy, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "正在重启虚拟机 %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "无法分离 %s 卷" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "无法找到 %s 卷" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "无法找到 %s 卷" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "无法为VDI %s 找到VBD" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "无法为VDI %s 找到VBD" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "使用空的请求上下文是不推荐的" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "无法识别的 read_deleted 取值”%s“" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "没有id为%(sm_backend_id)s的后台配置" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "没有 sm_flavor 调用 %(sm_flavor)s" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/db/sqlalchemy/api.py:1987 +#, fuzzy, python-format +msgid "No backup with id %(backup_id)s" +msgstr "没有id为%(sm_backend_id)s的后台配置" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "version应该是整数" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL连接失败 (%(connstring)s)。还剩 %(attempts)d 次。" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "表 |%s| 没有创建" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +#, fuzzy +msgid "quota_classes table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +#, fuzzy +msgid "quota_usages table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +#, fuzzy +msgid "reservations table not dropped" +msgstr "dns_domains 表没有删除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +#, fuzzy +msgid "volume_glance_metadata table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +#, fuzzy +msgid "backups table not dropped" +msgstr "dns_domains 表没有删除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +#, fuzzy +msgid "snapshot_metadata table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info'解析失败" + +#: cinder/image/image_utils.py:231 +#, fuzzy, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "转化为裸格式,但目前格式是 %s" + +#: cinder/image/image_utils.py:278 +#, fuzzy, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "转化为裸格式,但目前格式是 %s" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "得到异常:%s" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "正在丢弃原来的异常。" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "获得信号量 \"%(lock)s\" 为方法 \"%(method)s\" ...锁" + +#: cinder/openstack/common/lockutils.py:199 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "正在 试图获取锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/openstack/common/lockutils.py:226 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/openstack/common/lockutils.py:234 +#, fuzzy, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "_delete: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "syslog设备必须作为一个 %s 。" + +#: cinder/openstack/common/log.py:537 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "类 %(fullname)s 是不推荐的:%(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +#, fuzzy +msgid "in fixed duration looping call" +msgstr "循环调用中。" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +#, fuzzy +msgid "in dynamic looping call" +msgstr "循环调用中。" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, fuzzy, python-format +msgid "Failed to understand rule %(match)r" +msgstr "注入文件失败:%(resp)r" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "标记全集:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s 不在有效的优先级" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "Problem '%(e)s' 试图发送到通知系统。Payload=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "未打包的上下文:%s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "已接收 %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "没有适用于消息的方法:%s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "没有适用于消息的方法:%s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, fuzzy, python-format +msgid "Making synchronous call on %s ..." +msgstr "在 %s 做异步call" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "消息ID(MSG_ID)是 %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "在 %s 做异步cast" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "做异步fanout cast" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "发生未知异常。" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"远程错误:%(exc_type)s %(value)s\n" +"%(traceback)s。" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "无效的RPC连接重用。" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, fuzzy, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "删除 %(base_file)s 失败,错误是 %(error)s" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回 %s 异常给调用者" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新连接位于 %(hostname)s:%(port)d 的AMQP服务器" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "连接到位于 %(hostname)s:%(port)d 的AMQP服务器" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"无法连接到位于%(hostname)s:%(port)d的AMQP server,尝试已经 %(max_retries)d " +"次:%(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "位于%(hostname)s:%(port)d的AMQP服务器不可达:%(err_str)s。%(sleep_time)d 秒钟后请再尝试。" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "为topic '%(topic)s'声明消费者失败:%(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "等待RPC响应超时:%s" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "从队列中消费消息失败:%s" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "给topic '%(topic)s'发布消息失败:%(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, fuzzy, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "无法连接到AMQP服务器:%s " + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "重建AMQP队列" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, fuzzy, python-format +msgid "Deserializing: %s" +msgstr "Ext 描述: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "未打包的上下文:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "正在注销虚拟机 %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +#, fuzzy +msgid "In reactor registered" +msgstr "没有虚拟机注册" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, fuzzy, python-format +msgid "Could not create IPC directory %s" +msgstr "移除容器失败:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, fuzzy, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "给定数据:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +#, fuzzy +msgid "Creating payload" +msgstr "正在创建镜像" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "已接收 %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "请求无效。" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake 没有 %s 的实现" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "正确的服务在运行吗?" + +#: cinder/scheduler/chance.py:56 +#, fuzzy +msgid "Could not find another host" +msgstr "无法找到另一个计算节点" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "必须实现一个回滚 schedule" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "必须实现一个回滚 schedule" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "过滤掉的主机 %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, fuzzy, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "schedule_%(method)s 失败:%(ex)s" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "无法统计调度器的选项文件 %(filename)s:“%(e)s”" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "无法解码调度器的选项:“%(e)s”" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "卷没有足够可分配的空间" + +#: cinder/scheduler/filters/capacity_filter.py:37 +#, fuzzy +msgid "Free capacity not set: volume node info collection broken." +msgstr "未设置 VCPUs;假设 CPU 集合损坏了" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "伪执行命令(子进程):%s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "伪命令匹配 %s" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "伪命令引起异常 %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"下面的迁移缺少了降级:\n" +"\t%s" + +#: cinder/tests/test_storwize_svc.py:243 +#, fuzzy, python-format +msgid "unrecognized argument %s" +msgstr "无法识别的 read_deleted 取值”%s“" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, fuzzy, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "给定数据:%s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "结果数据:%s" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "无效的快照" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "分离卷 %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"状态码: %(_status)s\n" +"主体: %(_body)s" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "认证错误" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "授权错误" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "条目没有找到" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "正在 %(relative_url)s 执行 %(method)s" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "主体:%s" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "意外的状态码" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "解码JSON:%s" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "状态必须可用" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:177 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "搜索条件: %s" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "已经附加" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "已经分离" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "必须可用" + +#: cinder/volume/api.py:537 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:546 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "%(pid)s 已经超过配额,试图运行 %(min_count)s 个实例" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +#, fuzzy +msgid "Metadata property key greater than 255 characters" +msgstr "安全组 %s 不能比255个字符更长。" + +#: cinder/volume/api.py:631 +#, fuzzy +msgid "Metadata property value greater than 255 characters" +msgstr "安全组 %s 不能比255个字符更长。" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:719 +#, fuzzy +msgid "Volume status is in-use." +msgstr "卷 %s:卷繁忙" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "从失败的执行中恢复。尝试编号 %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "采用discovery,ISCSI provider_location 没有存储" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery:找到 %s" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "在 %s 未找到iSCSI设备" + +#: cinder/volume/driver.py:439 +#, fuzzy, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "在 %(mount_device)s 上还没有找到iSCSI卷。将再次扫描并重试。尝试次数:%(tries)s" + +#: cinder/volume/driver.py:451 +#, fuzzy, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "找到iSCSI节点 %(mount_device)s (经过%(tries)s 次再扫描)" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +#, fuzzy +msgid "Updating volume status" +msgstr "更新主机状态" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "重新导出卷%s" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "卷 %s:跳过导出" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, fuzzy, python-format +msgid "Resuming delete on volume: %s" +msgstr "正在删除volumeID:%s " + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "卷%s:创建成功" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "数据库错误:%s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "卷%s:删除中" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "卷不属于这个节点" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "卷%s:正在移除导出" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "卷 %s:卷繁忙" + +#: cinder/volume/manager.py:441 +#, fuzzy +msgid "Failed to update usages deleting volume" +msgstr "更新代理失败:%(resp)r" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "卷%s:删除成功" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "快照 %s:正在创建" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "快照 %(snap_name)s:正在创建" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "快照 %s:创建成功" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "快照 %s:正在删除" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "快照 %s:创建成功" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "快照 %s:删除成功" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "把卷 %(volume_id)s 附加到实例 %(instance_id)s 上位置在 %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "清理能力" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "收到通知 {%s}" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "数据库错误:%s" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "id不能是None" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "name不能是None" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, fuzzy, python-format +msgid "Message : %(message)s" +msgstr "%(code)s: %(message)s" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "无法找到地址 %r" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +#, fuzzy +msgid "JSON Error" +msgstr "迁移错误" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, fuzzy, python-format +msgid "Configure data : %s" +msgstr "给定数据:%s" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/coraid.py:199 +#, fuzzy, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "无法找到 %s 卷" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "无法找到 %s 卷" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "重新启动实例失败" + +#: cinder/volume/drivers/coraid.py:347 +#, fuzzy, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "快照 %(snap_name)s:正在创建" + +#: cinder/volume/drivers/coraid.py:362 +#, fuzzy, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "从快照 %s 创建卷" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "嵌套(调用)返回 %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "加载扩展发生异常:%s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "镜像已经挂载" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/lvm.py:345 +#, fuzzy, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:364 +#, fuzzy, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "没有找到标记 [%s]" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, fuzzy, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:542 +#, fuzzy, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "跳过remove_export。没有为卷导出iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "RBD没有池 %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "无效的快照" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "无法找到 %s 卷" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog 没有工作:%s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog 没有工作" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "SolidFire API 调用的参数:%s" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "调用 json.loads() 引起异常:%s" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "SolidFire API调用结果:%s" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "找到solidfire帐户:%s" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire帐户:%s 不存在,正在创建..." + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "进入SolidFire delete_volume..." + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "离开SolidFire delete_volume" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "正在执行SolidFire ensure_export..." + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "正在执行SolidFire create_export..." + +#: cinder/volume/drivers/solidfire.py:558 +#, fuzzy +msgid "Updating cluster status info" +msgstr "更新主机状态" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "无法得到最新的状态:%s" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, fuzzy, python-format +msgid "%s is not set" +msgstr "租户ID没有设" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, fuzzy, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "把主机 %(host)s 设置为 %(state)s。" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, fuzzy, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "正在删除有快照的卷 %(volume_name)s" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "卷 %s:跳过导出" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, fuzzy, python-format +msgid "_prepare_fc_map: %s" +msgstr "_create: %s" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"命令:%(cmd)s\n" +"退出代码:%(exit_code)s\n" +"标准输出:%(stdout)r\n" +"标准错误输出:%(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "正在创建存储库 %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "没有找到主机" + +#: cinder/volume/drivers/zadara.py:218 +#, fuzzy, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "把“%(method)s”投放在 %(topic)s \"%(host)s\"" + +#: cinder/volume/drivers/zadara.py:240 +#, fuzzy, python-format +msgid "Operation completed. %(data)s" +msgstr "确认完成" + +#: cinder/volume/drivers/zadara.py:350 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +#, fuzzy +msgid "Entering create_volume." +msgstr "进入SolidFire create_volume..." + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "从快照 %s 创建卷" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +#, fuzzy +msgid "Entering create_cloned_volume." +msgstr "进入SolidFire create_volume..." + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +#, fuzzy +msgid "Entering delete_volume." +msgstr "进入SolidFire delete_volume..." + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "删除id为 %s 的卷" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "重新导出卷%s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "把卷 %(volume_id)s 附加到实例 %(server_id)s 的 %(device)s 设备上" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "启动xvp发生错误:%s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, fuzzy, python-format +msgid "Map volume: %(volume)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, fuzzy, python-format +msgid "Unmap volume: %(volume)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, fuzzy, python-format +msgid "Volume %s is already mapped." +msgstr "rootfs 已经被移除了" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, fuzzy, python-format +msgid "Found Storage Type: %s" +msgstr "找到solidfire帐户:%s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +#, fuzzy +msgid "Storage type not found." +msgstr "镜像没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +#, fuzzy +msgid "Masking View not found." +msgstr "镜像没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +#, fuzzy +msgid "Ecom user not found." +msgstr "没有找到服务器。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +#, fuzzy +msgid "Ecom server not found." +msgstr "没有找到服务器。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "角色 %(role_id)s 没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "在存储库 %s 寻找VDIs出错" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "引起异常 NotFound: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, fuzzy, python-format +msgid "delete_volume: volume name: %s." +msgstr "删除id为 %s 的卷" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, fuzzy, python-format +msgid "create_export: volume name:%s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, fuzzy, python-format +msgid "_read_xml:%s" +msgstr "_create: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, fuzzy, python-format +msgid "_execute_cli:%s" +msgstr "_delete: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, fuzzy, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, fuzzy, python-format +msgid "Using DFM server: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "正在运行的实例:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "正在删除基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "未知的基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +#, fuzzy +msgid "Failed to provision dataset member" +msgstr "更新数据库失败" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +#, fuzzy +msgid "Failed to remove and delete dataset LUN member" +msgstr "更新数据库失败" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +#, fuzzy +msgid "Failed to remove and delete dataset Qtree member" +msgstr "更新数据库失败" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, fuzzy, python-format +msgid "Created LUN with name %s" +msgstr "已经创建路径为 %s 的目录" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "嵌套(调用)返回 %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "正在删除基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "快照 %s:删除成功" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +#, fuzzy +msgid "Nexenta SA returned the error" +msgstr "服务器返回错误:%s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "给定数据:%s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +#, fuzzy +msgid "Bad response from server" +msgstr "来自SolidFire API的错误响应" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "返回CLIQ命令 %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "CLIQ命令 %(verb)s %(cliq_args)s 错误格式的响应。Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "在运行CLIQ命令 %(verb)s %(cliq_args)s 时发生错误。输出结果 Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "集群 %(cluster_name)s 有意外数量的虚拟 ip 地址。输出结果 Result=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "卷信息:%(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "不支持local_path" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "握手出错:%s" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "指定san_password或者san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip必须设置" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "没有为 %(zfs_poolname)s 找到LUID。Output=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "握手出错:%s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "安全组 %s 已经存在" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "运行命令时出现意外错误" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..7bbe63bbbf --- /dev/null +++ b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po @@ -0,0 +1,5598 @@ +# Chinese (Traditional) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-07 02:00+0000\n" +"Last-Translator: Charles Hsu \n" +"Language-Team: Chinese (Traditional) \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "非預期的執行錯誤" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代碼: %(exit_code)s\n" +"標準輸出: %(stdout)r\n" +"標準錯誤輸出: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "發生一個未知例外" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "連接到glance失敗" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "未被授權" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "使用者並沒有管理者權力" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "不可接受的參數值" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "無效的快照(snapshot)" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "無法替 instance實例 %(instance_name)s , 建立 VDI 在SR %(sr_ref)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "發生一個未知例外" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "發生一個未知例外" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "連接到glance失敗" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "找不到Volume %s" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "無效的Keypair" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "找不到Volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "找不到Volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "發生一個未知例外" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "無效的快照(snapshot)" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "無法卸載 Volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "無效的快照(snapshot)" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "內文解碼失敗" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代碼: %(exit_code)s\n" +"標準輸出: %(stdout)r\n" +"標準錯誤輸出: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/manager.py b/cinder/manager.py new file mode 100644 index 0000000000..5f52e56865 --- /dev/null +++ b/cinder/manager.py @@ -0,0 +1,221 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base Manager class. + +Managers are responsible for a certain aspect of the system. It is a logical +grouping of code relating to a portion of the system. In general other +components should be using the manager to make changes to the components that +it is responsible for. + +For example, other components that need to deal with volumes in some way, +should do so by calling methods on the VolumeManager instead of directly +changing fields in the database. This allows us to keep all of the code +relating to volumes in the same place. + +We have adopted a basic strategy of Smart managers and dumb data, which means +rather than attaching methods to data objects, components should call manager +methods that act on the data. + +Methods on managers that can be executed locally should be called directly. If +a particular method must execute on a remote host, this should be done via rpc +to the service that wraps the manager + +Managers should be responsible for most of the db access, and +non-implementation specific data. Anything implementation specific that can't +be generalized should be done by the Driver. + +In general, we prefer to have one manager with multiple drivers for different +implementations, but sometimes it makes sense to have multiple managers. You +can think of it this way: Abstract different overall strategies at the manager +level(FlatNetwork vs VlanNetwork), and different implementations at the driver +level(LinuxNetDriver vs CiscoNetDriver). + +Managers will often provide methods for initial setup of a host or periodic +tasks to a wrapping service. + +This module provides Manager, a base class for managers. + +""" + +from cinder.db import base +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder import version + + +FLAGS = flags.FLAGS + + +LOG = logging.getLogger(__name__) + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on every tick + of the periodic scheduler. + + 2. With arguments, @periodic_task(ticks_between_runs=N), this will be + run on every N ticks of the periodic scheduler. + """ + def decorator(f): + f._periodic_task = True + f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parens. + # + # In the 'with-parens' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parens' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class ManagerMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(ManagerMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._ticks_to_skip = cls._ticks_to_skip.copy() + except AttributeError: + cls._ticks_to_skip = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + cls._periodic_tasks.append((name, task)) + cls._ticks_to_skip[name] = task._ticks_between_runs + + +class Manager(base.Base): + __metaclass__ = ManagerMeta + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self, host=None, db_driver=None): + if not host: + host = FLAGS.host + self.host = host + super(Manager, self).__init__(db_driver) + + def create_rpc_dispatcher(self): + '''Get the rpc dispatcher for this manager. + + If a manager would like to set an rpc API version, or support more than + one class as the target of rpc messages, override this method. + ''' + return rpc_dispatcher.RpcDispatcher([self]) + + def periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + ticks_to_skip = self._ticks_to_skip[task_name] + if ticks_to_skip > 0: + LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" + " ticks left until next run"), locals()) + self._ticks_to_skip[task_name] -= 1 + continue + + self._ticks_to_skip[task_name] = task._ticks_between_runs + LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_("Error during %(full_task_name)s: %(e)s"), + locals()) + + def init_host(self): + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ + pass + + def service_version(self, context): + return version.version_string() + + def service_config(self, context): + config = {} + for key in FLAGS: + config[key] = FLAGS.get(key, None) + return config + + +class SchedulerDependentManager(Manager): + """Periodically send capability updates to the Scheduler services. + + Services that need to update the Scheduler of their capabilities + should derive from this class. Otherwise they can derive from + manager.Manager directly. Updates are only sent after + update_service_capabilities is called with non-None values. + + """ + + def __init__(self, host=None, db_driver=None, service_name='undefined'): + self.last_capabilities = None + self.service_name = service_name + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + super(SchedulerDependentManager, self).__init__(host, db_driver) + + def update_service_capabilities(self, capabilities): + """Remember these capabilities to send on next periodic update.""" + self.last_capabilities = capabilities + + @periodic_task + def _publish_service_capabilities(self, context): + """Pass data back to the scheduler at a periodic interval.""" + if self.last_capabilities: + LOG.debug(_('Notifying Schedulers of capabilities ...')) + self.scheduler_rpcapi.update_service_capabilities( + context, + self.service_name, + self.host, + self.last_capabilities) diff --git a/cinder/openstack/__init__.py b/cinder/openstack/__init__.py new file mode 100644 index 0000000000..0a3b98867a --- /dev/null +++ b/cinder/openstack/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/README b/cinder/openstack/common/README new file mode 100644 index 0000000000..def4a172aa --- /dev/null +++ b/cinder/openstack/common/README @@ -0,0 +1,13 @@ +openstack-common +---------------- + +A number of modules from openstack-common are imported into this project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. diff --git a/cinder/openstack/common/__init__.py b/cinder/openstack/common/__init__.py new file mode 100644 index 0000000000..0a3b98867a --- /dev/null +++ b/cinder/openstack/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/context.py b/cinder/openstack/common/context.py new file mode 100644 index 0000000000..e9cfd73cc1 --- /dev/null +++ b/cinder/openstack/common/context.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools +import uuid + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + + """ + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + return {'user': self.user, + 'tenant': self.tenant, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id} + + +def get_admin_context(show_deleted="no"): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None diff --git a/cinder/openstack/common/eventlet_backdoor.py b/cinder/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000000..c0ad460fe6 --- /dev/null +++ b/cinder/openstack/common/eventlet_backdoor.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gc +import pprint +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +eventlet_backdoor_opts = [ + cfg.IntOpt('backdoor_port', + default=None, + help='port for eventlet backdoor to listen') +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) + + +def _dont_use_this(): + print "Don't use this, just disconnect instead" + + +def _find_objects(t): + return filter(lambda o: isinstance(o, t), gc.get_objects()) + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print i, gt + traceback.print_stack(gt.gr_frame) + print + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print threadId + traceback.print_stack(stack) + print + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = eventlet.listen(('localhost', CONF.backdoor_port)) + port = sock.getsockname()[1] + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/cinder/openstack/common/exception.py b/cinder/openstack/common/exception.py new file mode 100644 index 0000000000..c8690157f5 --- /dev/null +++ b/cinder/openstack/common/exception.py @@ -0,0 +1,142 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exceptions common to OpenStack projects +""" + +import logging + +from cinder.openstack.common.gettextutils import _ + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class Error(Exception): + def __init__(self, message=None): + super(Error, self).__init__(message) + + +class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): + self.message = message + self.code = code + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class UnknownScheme(Error): + + msg = "Unknown scheme '%s' found in URI" + + def __init__(self, scheme): + msg = self.__class__.msg % scheme + super(UnknownScheme, self).__init__(msg) + + +class BadStoreUri(Error): + + msg = "The Store URI %s was malformed. Reason: %s" + + def __init__(self, uri, reason): + msg = self.__class__.msg % (uri, reason) + super(BadStoreUri, self).__init__(msg) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class BadInputError(Exception): + """Error resulting from a client sending bad input to a server""" + pass + + +class MissingArgumentError(Error): + pass + + +class DatabaseMigrationError(Error): + pass + + +class ClientConnectionError(Exception): + """Error resulting from a client connecting to a server""" + pass + + +def wrap_exception(f): + def _wrap(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + if not isinstance(e, Error): + #exc_type, exc_value, exc_traceback = sys.exc_info() + logging.exception(_('Uncaught exception')) + #logging.error(traceback.extract_stack(exc_traceback)) + raise Error(str(e)) + raise + _wrap.func_name = f.func_name + return _wrap + + +class OpenstackException(Exception): + """ + Base Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = "An unknown exception occurred" + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + + except Exception as e: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise e + else: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +class MalformedRequestBody(OpenstackException): + message = "Malformed message body: %(reason)s" + + +class InvalidContentType(OpenstackException): + message = "Invalid content type %(content_type)s" diff --git a/cinder/openstack/common/excutils.py b/cinder/openstack/common/excutils.py new file mode 100644 index 0000000000..ccb2d072e9 --- /dev/null +++ b/cinder/openstack/common/excutils.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import contextlib +import logging +import sys +import traceback + +from cinder.openstack.common.gettextutils import _ + + +@contextlib.contextmanager +def save_and_reraise_exception(): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + """ + type_, value, tb = sys.exc_info() + try: + yield + except Exception: + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(type_, value, tb)) + raise + raise type_, value, tb diff --git a/cinder/openstack/common/fileutils.py b/cinder/openstack/common/fileutils.py new file mode 100644 index 0000000000..b988ad03d5 --- /dev/null +++ b/cinder/openstack/common/fileutils.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import os + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise diff --git a/cinder/openstack/common/gettextutils.py b/cinder/openstack/common/gettextutils.py new file mode 100644 index 0000000000..5c0540b43d --- /dev/null +++ b/cinder/openstack/common/gettextutils.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from cinder.openstack.common.gettextutils import _ +""" + +import gettext +import os + +_localedir = os.environ.get('cinder'.upper() + '_LOCALEDIR') +_t = gettext.translation('cinder', localedir=_localedir, fallback=True) + + +def _(msg): + return _t.ugettext(msg) + + +def install(domain): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + """ + gettext.install(domain, + localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), + unicode=True) diff --git a/cinder/openstack/common/importutils.py b/cinder/openstack/common/importutils.py new file mode 100644 index 0000000000..3bd277f47e --- /dev/null +++ b/cinder/openstack/common/importutils.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """ + Import a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/cinder/openstack/common/jsonutils.py b/cinder/openstack/common/jsonutils.py new file mode 100644 index 0000000000..70134d4192 --- /dev/null +++ b/cinder/openstack/common/jsonutils.py @@ -0,0 +1,167 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import datetime +import functools +import inspect +import itertools +import json +import types +import xmlrpclib + +from cinder.openstack.common import timeutils + + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (types.NoneType, int, basestring, bool, float, long) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return unicode(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in value.iteritems()) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + else: + if any(test(value) for test in _nasty_type_tests): + return unicode(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s): + return json.loads(s) + + +def load(s): + return json.load(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/cinder/openstack/common/local.py b/cinder/openstack/common/local.py new file mode 100644 index 0000000000..f1bfc824bf --- /dev/null +++ b/cinder/openstack/common/local.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Greenthread local storage of variables using weak references""" + +import weakref + +from eventlet import corolocal + + +class WeakLocal(corolocal.local): + def __getattribute__(self, attr): + rval = corolocal.local.__getattribute__(self, attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return corolocal.local.__setattr__(self, attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = corolocal.local diff --git a/cinder/openstack/common/lockutils.py b/cinder/openstack/common/lockutils.py new file mode 100644 index 0000000000..f21f0d9186 --- /dev/null +++ b/cinder/openstack/common/lockutils.py @@ -0,0 +1,278 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import functools +import os +import shutil +import tempfile +import time +import weakref + +from eventlet import semaphore +from oslo.config import cfg + +from cinder.openstack.common import fileutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import local +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + help=('Directory to use for lock files. Default to a ' + 'temp directory')) +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +def set_defaults(lock_path): + cfg.set_defaults(util_opts, lock_path=lock_path) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() + + +def synchronized(name, lock_file_prefix, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the foo method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + + The external keyword argument denotes whether this lock should work across + multiple processes. This means that if two different workers both run a + a method decorated with @synchronized('mylock', external=True), only one + of them will execute at a time. + + The lock_path keyword argument is used to specify a special location for + external lock files to live. If nothing is set, then CONF.lock_path is + used as a default. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + # NOTE(soren): If we ever go natively threaded, this will be racy. + # See http://stackoverflow.com/questions/5390569/dyn + # amically-allocating-and-destroying-mutexes + sem = _semaphores.get(name, semaphore.Semaphore()) + if name not in _semaphores: + # this check is not racy - we're already holding ref locally + # so GC won't remove the item and there was no IO switch + # (only valid in greenthreads) + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + + # NOTE(mikal): I know this looks odd + if not hasattr(local.strong_store, 'locks_held'): + local.strong_store.locks_held = [] + local.strong_store.locks_held.append(name) + + try: + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s" ' + 'for method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + cleanup_dir = False + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path + if not local_lock_path: + local_lock_path = CONF.lock_path + + if not local_lock_path: + cleanup_dir = True + local_lock_path = tempfile.mkdtemp() + + if not os.path.exists(local_lock_path): + fileutils.ensure_tree(local_lock_path) + + # NOTE(mikal): the lock name cannot contain directory + # separators + safe_name = name.replace(os.sep, '_') + lock_file_name = '%s%s' % (lock_file_prefix, safe_name) + lock_file_path = os.path.join(local_lock_path, + lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock: + LOG.debug(_('Got file lock "%(lock)s" at ' + '%(path)s for method ' + '"%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + retval = f(*args, **kwargs) + finally: + LOG.debug(_('Released file lock "%(lock)s" at ' + '%(path)s for method "%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + # NOTE(vish): This removes the tempdir if we needed + # to create one. This is used to + # cleanup the locks left behind by unit + # tests. + if cleanup_dir: + shutil.rmtree(local_lock_path) + else: + retval = f(*args, **kwargs) + + finally: + local.strong_store.locks_held.remove(name) + + return retval + return inner + return wrap + + +def synchronized_with_prefix(lock_file_prefix): + """Partial object generator for the synchronization decorator. + + Redefine @synchronized in each project like so:: + + (in nova/utils.py) + from nova.openstack.common import lockutils + + synchronized = lockutils.synchronized_with_prefix('nova-') + + + (in nova/foo.py) + from nova import utils + + @utils.synchronized('mylock') + def bar(self, *args): + ... + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + """ + + return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) diff --git a/cinder/openstack/common/log.py b/cinder/openstack/common/log.py new file mode 100644 index 0000000000..c170971f95 --- /dev/null +++ b/cinder/openstack/common/log.py @@ -0,0 +1,540 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import ConfigParser +import cStringIO +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import stat +import sys +import traceback + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import jsonutils +from cinder.openstack.common import local +from cinder.openstack.common import notifier + + +_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=_DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user)s %(tenant)s] ' + '%(instance)s%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqplib=WARN', + 'sqlalchemy=WARN', + 'boto=WARN', + 'suds=INFO', + 'keystone=INFO', + 'eventlet.wsgi.server=WARN' + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + +class ContextAdapter(logging.LoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + else: + instance_uuid = kwargs.pop('instance_uuid', None) + if instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"project": self.project}) + extra.update({"version": self.version}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [itertools.ifilter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('cinder.openstack.common.notifier.log_notifier' in + CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.msg)) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(type, value, tb): + extra = {} + if CONF.verbose: + extra['exc_info'] = (type, value, tb) + getLogger(product_name).critical(str(value), **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config): + try: + logging.config.fileConfig(log_config) + except ConfigParser.Error, exc: + raise LogConfigError(log_config, str(exc)) + + +def setup(product_name): + """Setup logging.""" + if CONF.log_config: + _load_log_config(CONF.log_config) + else: + _setup_logging_from_conf() + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_conf(): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + mode = int(CONF.logfile_mode, 8) + st = os.stat(logpath) + if st.st_mode != (stat.S_IFREG | mode): + os.chmod(logpath, mode) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not CONF.log_file: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + + for handler in log_root.handlers: + datefmt = CONF.log_date_format + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + else: + handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) + + +class LegacyFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formating params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/cinder/openstack/common/loopingcall.py b/cinder/openstack/common/loopingcall.py new file mode 100644 index 0000000000..8be3a00eb4 --- /dev/null +++ b/cinder/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +# TODO(mikal): this class name is deprecated in Havana and should be removed +# in the I release +LoopingCall = FixedIntervalLoopingCall + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug(_('Dynamic looping call sleeping for %.02f ' + 'seconds'), idle) + greenthread.sleep(idle) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/cinder/openstack/common/network_utils.py b/cinder/openstack/common/network_utils.py new file mode 100644 index 0000000000..5224e01aa9 --- /dev/null +++ b/cinder/openstack/common/network_utils.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +import logging + +LOG = logging.getLogger(__name__) + + +def parse_host_port(address, default_port=None): + """ + Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) diff --git a/cinder/openstack/common/notifier/__init__.py b/cinder/openstack/common/notifier/__init__.py new file mode 100644 index 0000000000..45c3b46ae9 --- /dev/null +++ b/cinder/openstack/common/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/notifier/api.py b/cinder/openstack/common/notifier/api.py new file mode 100644 index 0000000000..6b82e4451e --- /dev/null +++ b/cinder/openstack/common/notifier/api.py @@ -0,0 +1,182 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg + +from cinder.openstack.common import context +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +notifier_opts = [ + cfg.MultiStrOpt('notification_driver', + default=[], + help='Driver or drivers to handle sending notifications'), + cfg.StrOpt('default_notification_level', + default='INFO', + help='Default notification level for outgoing notifications'), + cfg.StrOpt('default_publisher_id', + default='$host', + help='Default publisher_id for outgoing notifications'), +] + +CONF = cfg.CONF +CONF.register_opts(notifier_opts) + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + + """ + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + + ctxt = context.get_context_from_function_and_args(fn, args, kwarg) + notify(ctxt, + CONF.default_publisher_id, + name, + CONF.default_notification_level, + body) + return fn(*args, **kwarg) + return wrapped_func + + +def publisher_id(service, host=None): + if not host: + host = CONF.host + return "%s.%s" % (service, host) + + +def notify(context, publisher_id, event_type, priority, payload): + """Sends a notification using the specified driver + + :param publisher_id: the source worker_type.host of the message + :param event_type: the literal type of event (ex. Instance Creation) + :param priority: patterned after the enumeration of Python logging + levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + :param payload: A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id + a UUID representing the id for this notification + + timestamp + the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example:: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': timeutils.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities') % priority) + + # Ensure everything is JSON serializable. + payload = jsonutils.to_primitive(payload, convert_instances=True) + + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(timeutils.utcnow())) + + for driver in _get_drivers(): + try: + driver.notify(context, msg) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system. " + "Payload=%(payload)s") + % dict(e=e, payload=payload)) + + +_drivers = None + + +def _get_drivers(): + """Instantiate, cache, and return drivers based on the CONF.""" + global _drivers + if _drivers is None: + _drivers = {} + for notification_driver in CONF.notification_driver: + add_driver(notification_driver) + + return _drivers.values() + + +def add_driver(notification_driver): + """Add a notification driver at runtime.""" + # Make sure the driver list is initialized. + _get_drivers() + if isinstance(notification_driver, basestring): + # Load and add + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) + else: + # Driver is already loaded; just add the object. + _drivers[notification_driver] = notification_driver + + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global _drivers + _drivers = None diff --git a/cinder/openstack/common/notifier/log_notifier.py b/cinder/openstack/common/notifier/log_notifier.py new file mode 100644 index 0000000000..010d29cea3 --- /dev/null +++ b/cinder/openstack/common/notifier/log_notifier.py @@ -0,0 +1,35 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging + + +CONF = cfg.CONF + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model. + Log notifications using openstack's default logging system""" + + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'cinder.openstack.common.notification.%s' % + message['event_type']) + getattr(logger, priority)(jsonutils.dumps(message)) diff --git a/cinder/openstack/common/notifier/no_op_notifier.py b/cinder/openstack/common/notifier/no_op_notifier.py new file mode 100644 index 0000000000..bc7a56ca7a --- /dev/null +++ b/cinder/openstack/common/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/cinder/openstack/common/notifier/rabbit_notifier.py b/cinder/openstack/common/notifier/rabbit_notifier.py new file mode 100644 index 0000000000..2ffe9524e9 --- /dev/null +++ b/cinder/openstack/common/notifier/rabbit_notifier.py @@ -0,0 +1,29 @@ +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import rpc_notifier + +LOG = logging.getLogger(__name__) + + +def notify(context, message): + """Deprecated in Grizzly. Please use rpc_notifier instead.""" + + LOG.deprecated(_("The rabbit_notifier is now deprecated." + " Please use rpc_notifier instead.")) + rpc_notifier.notify(context, message) diff --git a/cinder/openstack/common/notifier/rpc_notifier.py b/cinder/openstack/common/notifier/rpc_notifier.py new file mode 100644 index 0000000000..46a95a17c9 --- /dev/null +++ b/cinder/openstack/common/notifier/rpc_notifier.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder.openstack.common import context as req_context +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'notification_topics', default=['notifications', ], + help='AMQP topic used for openstack notifications') + +CONF = cfg.CONF +CONF.register_opt(notification_topic_opt) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/cinder/openstack/common/notifier/rpc_notifier2.py b/cinder/openstack/common/notifier/rpc_notifier2.py new file mode 100644 index 0000000000..62a8eda53d --- /dev/null +++ b/cinder/openstack/common/notifier/rpc_notifier2.py @@ -0,0 +1,52 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''messaging based notification driver, with message envelopes''' + +from oslo.config import cfg + +from cinder.openstack.common import context as req_context +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'topics', default=['notifications', ], + help='AMQP topic(s) used for openstack notifications') + +opt_group = cfg.OptGroup(name='rpc_notifier2', + title='Options for rpc_notifier2') + +CONF = cfg.CONF +CONF.register_group(opt_group) +CONF.register_opt(notification_topic_opt, opt_group) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.rpc_notifier2.topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message, envelope=True) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/cinder/openstack/common/notifier/test_notifier.py b/cinder/openstack/common/notifier/test_notifier.py new file mode 100644 index 0000000000..96c1746bf4 --- /dev/null +++ b/cinder/openstack/common/notifier/test_notifier.py @@ -0,0 +1,22 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +NOTIFICATIONS = [] + + +def notify(_context, message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/cinder/openstack/common/policy.py b/cinder/openstack/common/policy.py new file mode 100644 index 0000000000..0ca48ce9e8 --- /dev/null +++ b/cinder/openstack/common/policy.py @@ -0,0 +1,301 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Common Policy Engine Implementation""" + +import logging +import urllib +import urllib2 + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import jsonutils + + +LOG = logging.getLogger(__name__) + + +_BRAIN = None + + +def set_brain(brain): + """Set the brain used by enforce(). + + Defaults use Brain() if not set. + + """ + global _BRAIN + _BRAIN = brain + + +def reset(): + """Clear the brain used by enforce().""" + global _BRAIN + _BRAIN = None + + +def enforce(match_list, target_dict, credentials_dict, exc=None, + *args, **kwargs): + """Enforces authorization of some rules against credentials. + + :param match_list: nested tuples of data to match against + + The basic brain supports three types of match lists: + + 1) rules + + looks like: ``('rule:compute:get_instance',)`` + + Retrieves the named rule from the rules dict and recursively + checks against the contents of the rule. + + 2) roles + + looks like: ``('role:compute:admin',)`` + + Matches if the specified role is in credentials_dict['roles']. + + 3) generic + + looks like: ``('tenant_id:%(tenant_id)s',)`` + + Substitutes values from the target dict into the match using + the % operator and matches them against the creds dict. + + Combining rules: + + The brain returns True if any of the outer tuple of rules + match and also True if all of the inner tuples match. You + can use this to perform simple boolean logic. For + example, the following rule would return True if the creds + contain the role 'admin' OR the if the tenant_id matches + the target dict AND the the creds contains the role + 'compute_sysadmin': + + :: + + { + "rule:combined": ( + 'role:admin', + ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin') + ) + } + + Note that rule and role are reserved words in the credentials match, so + you can't match against properties with those names. Custom brains may + also add new reserved words. For example, the HttpBrain adds http as a + reserved word. + + :param target_dict: dict of object properties + + Target dicts contain as much information as we can about the object being + operated on. + + :param credentials_dict: dict of actor properties + + Credentials dicts contain as much information as we can about the user + performing the action. + + :param exc: exception to raise + + Class of the exception to raise if the check fails. Any remaining + arguments passed to enforce() (both positional and keyword arguments) + will be passed to the exception class. If exc is not provided, returns + False. + + :return: True if the policy allows the action + :return: False if the policy does not allow the action and exc is not set + """ + global _BRAIN + if not _BRAIN: + _BRAIN = Brain() + if not _BRAIN.check(match_list, target_dict, credentials_dict): + if exc: + raise exc(*args, **kwargs) + return False + return True + + +class Brain(object): + """Implements policy checking.""" + + _checks = {} + + @classmethod + def _register(cls, name, func): + cls._checks[name] = func + + @classmethod + def load_json(cls, data, default_rule=None): + """Init a brain using json instead of a rules dictionary.""" + rules_dict = jsonutils.loads(data) + return cls(rules=rules_dict, default_rule=default_rule) + + def __init__(self, rules=None, default_rule=None): + if self.__class__ != Brain: + LOG.warning(_("Inheritance-based rules are deprecated; use " + "the default brain instead of %s.") % + self.__class__.__name__) + + self.rules = rules or {} + self.default_rule = default_rule + + def add_rule(self, key, match): + self.rules[key] = match + + def _check(self, match, target_dict, cred_dict): + try: + match_kind, match_value = match.split(':', 1) + except Exception: + LOG.exception(_("Failed to understand rule %(match)r") % locals()) + # If the rule is invalid, fail closed + return False + + func = None + try: + old_func = getattr(self, '_check_%s' % match_kind) + except AttributeError: + func = self._checks.get(match_kind, self._checks.get(None, None)) + else: + LOG.warning(_("Inheritance-based rules are deprecated; update " + "_check_%s") % match_kind) + func = lambda brain, kind, value, target, cred: old_func(value, + target, + cred) + + if not func: + LOG.error(_("No handler for matches of kind %s") % match_kind) + # Fail closed + return False + + return func(self, match_kind, match_value, target_dict, cred_dict) + + def check(self, match_list, target_dict, cred_dict): + """Checks authorization of some rules against credentials. + + Detailed description of the check with examples in policy.enforce(). + + :param match_list: nested tuples of data to match against + :param target_dict: dict of object properties + :param credentials_dict: dict of actor properties + + :returns: True if the check passes + + """ + if not match_list: + return True + for and_list in match_list: + if isinstance(and_list, basestring): + and_list = (and_list,) + if all([self._check(item, target_dict, cred_dict) + for item in and_list]): + return True + return False + + +class HttpBrain(Brain): + """A brain that can check external urls for policy. + + Posts json blobs for target and credentials. + + Note that this brain is deprecated; the http check is registered + by default. + """ + + pass + + +def register(name, func=None): + """ + Register a function as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default function + will be registered. + :param func: If given, provides the function to register. If not + given, returns a function taking one argument to + specify the function to register, allowing use as a + decorator. + """ + + # Perform the actual decoration by registering the function. + # Returns the function for compliance with the decorator + # interface. + def decorator(func): + # Register the function + Brain._register(name, func) + return func + + # If the function is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +def _check_rule(brain, match_kind, match, target_dict, cred_dict): + """Recursively checks credentials based on the brains rules.""" + try: + new_match_list = brain.rules[match] + except KeyError: + if brain.default_rule and match != brain.default_rule: + new_match_list = ('rule:%s' % brain.default_rule,) + else: + return False + + return brain.check(new_match_list, target_dict, cred_dict) + + +@register("role") +def _check_role(brain, match_kind, match, target_dict, cred_dict): + """Check that there is a matching role in the cred dict.""" + return match.lower() in [x.lower() for x in cred_dict['roles']] + + +@register('http') +def _check_http(brain, match_kind, match, target_dict, cred_dict): + """Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response is + exactly 'True'. A custom brain using response codes could easily + be implemented. + + """ + url = 'http:' + (match % target_dict) + data = {'target': jsonutils.dumps(target_dict), + 'credentials': jsonutils.dumps(cred_dict)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +def _check_generic(brain, match_kind, match, target_dict, cred_dict): + """Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + + """ + + # TODO(termie): do dict inspection via dot syntax + match = match % target_dict + if match_kind in cred_dict: + return match == unicode(cred_dict[match_kind]) + return False diff --git a/cinder/openstack/common/processutils.py b/cinder/openstack/common/processutils.py new file mode 100644 index 0000000000..47e7d08b1e --- /dev/null +++ b/cinder/openstack/common/processutils.py @@ -0,0 +1,181 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import os +import random +import shlex +import signal + +from eventlet.green import subprocess +from eventlet import greenthread + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" + % (description, cmd, exit_code, stdout, stderr)) + super(ProcessExecutionError, self).__init__(message) + + +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def execute(*cmd, **kwargs): + """ + Helper method to shell out and execute a command through subprocess with + optional retry. + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + :param process_input: Send to opened process. + :type proces_input: string + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix to commands called with + run_as_root=True + :type root_helper: string + :param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + shell = kwargs.pop('shell', False) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if len(kwargs): + raise UnknownArgumentError(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=('Command requested root, but did not specify a root ' + 'helper.')) + cmd = shlex.split(root_helper) + list(cmd) + + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) diff --git a/cinder/openstack/common/rootwrap/__init__.py b/cinder/openstack/common/rootwrap/__init__.py new file mode 100644 index 0000000000..2d32e4ef31 --- /dev/null +++ b/cinder/openstack/common/rootwrap/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/rootwrap/cmd.py b/cinder/openstack/common/rootwrap/cmd.py new file mode 100755 index 0000000000..78265e30ca --- /dev/null +++ b/cinder/openstack/common/rootwrap/cmd.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for OpenStack services + + Filters which commands a service is allowed to run as another user. + + To use this with cinder, you should set the following in + cinder.conf: + rootwrap_config=/etc/cinder/rootwrap.conf + + You also need to let the cinder user run cinder-rootwrap + as root in sudoers: + cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap + /etc/cinder/rootwrap.conf * + + Service packaging should deploy .filters files only on nodes where + they are needed, to avoid allowing more than is necessary. +""" + +import ConfigParser +import logging +import os +import pwd +import signal +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_NOEXECFOUND = 96 + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def _exit_error(execname, message, errorcode, log=True): + print "%s: %s" % (execname, message) + if log: + logging.error(message) + sys.exit(errorcode) + + +def main(): + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) < 2: + _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) + + configfile = sys.argv.pop(0) + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from cinder.openstack.common.rootwrap import wrapper + + # Load configuration + try: + rawconfig = ConfigParser.RawConfigParser() + rawconfig.read(configfile) + config = wrapper.RootwrapConfig(rawconfig) + except ValueError as exc: + msg = "Incorrect value in %s: %s" % (configfile, exc.message) + _exit_error(execname, msg, RC_BADCONFIG, log=False) + except ConfigParser.Error: + _exit_error(execname, "Incorrect configuration file: %s" % configfile, + RC_BADCONFIG, log=False) + + if config.use_syslog: + wrapper.setup_syslog(execname, + config.syslog_log_facility, + config.syslog_log_level) + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(config.filters_path) + try: + filtermatch = wrapper.match_filter(filters, userargs, + exec_dirs=config.exec_dirs) + if filtermatch: + command = filtermatch.get_command(userargs, + exec_dirs=config.exec_dirs) + if config.use_syslog: + logging.info("(%s > %s) Executing %s (filter match = %s)" % ( + os.getlogin(), pwd.getpwuid(os.getuid())[0], + command, filtermatch.name)) + + obj = subprocess.Popen(command, + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + preexec_fn=_subprocess_setup, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + except wrapper.FilterMatchNotExecutable as exc: + msg = ("Executable not found: %s (filter match = %s)" + % (exc.match.exec_path, exc.match.name)) + _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) + + except wrapper.NoFilterMatched: + msg = ("Unauthorized command: %s (no filter matched)" + % ' '.join(userargs)) + _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) diff --git a/cinder/openstack/common/rootwrap/filters.py b/cinder/openstack/common/rootwrap/filters.py new file mode 100644 index 0000000000..d9618af883 --- /dev/null +++ b/cinder/openstack/common/rootwrap/filters.py @@ -0,0 +1,226 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re + + +class CommandFilter(object): + """Command filter only checking that the 1st argument matches exec_path""" + + def __init__(self, exec_path, run_as, *args): + self.name = '' + self.exec_path = exec_path + self.run_as = run_as + self.args = args + self.real_exec = None + + def get_exec(self, exec_dirs=[]): + """Returns existing executable, or empty string if none found""" + if self.real_exec is not None: + return self.real_exec + self.real_exec = "" + if self.exec_path.startswith('/'): + if os.access(self.exec_path, os.X_OK): + self.real_exec = self.exec_path + else: + for binary_path in exec_dirs: + expanded_path = os.path.join(binary_path, self.exec_path) + if os.access(expanded_path, os.X_OK): + self.real_exec = expanded_path + break + return self.real_exec + + def match(self, userargs): + """Only check that the first argument (command) matches exec_path""" + if (os.path.basename(self.exec_path) == userargs[0]): + return True + return False + + def get_command(self, userargs, exec_dirs=[]): + """Returns command to execute (with sudo -u if run_as != root).""" + to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path + if (self.run_as != 'root'): + # Used to run commands at lesser privileges + return ['sudo', '-u', self.run_as, to_exec] + userargs[1:] + return [to_exec] + userargs[1:] + + def get_environment(self, userargs): + """Returns specific environment to set, None if none""" + return None + + +class RegExpFilter(CommandFilter): + """Command filter doing regexp matching for every argument""" + + def match(self, userargs): + # Early skip if command or number of args don't match + if (len(self.args) != len(userargs)): + # DENY: argument numbers don't match + return False + # Compare each arg (anchoring pattern explicitly at end of string) + for (pattern, arg) in zip(self.args, userargs): + try: + if not re.match(pattern + '$', arg): + break + except re.error: + # DENY: Badly-formed filter + return False + else: + # ALLOW: All arguments matched + return True + + # DENY: Some arguments did not match + return False + + +class PathFilter(CommandFilter): + """Command filter checking that path arguments are within given dirs + + One can specify the following constraints for command arguments: + 1) pass - pass an argument as is to the resulting command + 2) some_str - check if an argument is equal to the given string + 3) abs path - check if a path argument is within the given base dir + + A typical rootwrapper filter entry looks like this: + # cmdname: filter name, raw command, user, arg_i_constraint [, ...] + chown: PathFilter, /bin/chown, root, nova, /var/lib/images + + """ + + def match(self, userargs): + command, arguments = userargs[0], userargs[1:] + + equal_args_num = len(self.args) == len(arguments) + exec_is_valid = super(PathFilter, self).match(userargs) + args_equal_or_pass = all( + arg == 'pass' or arg == value + for arg, value in zip(self.args, arguments) + if not os.path.isabs(arg) # arguments not specifying abs paths + ) + paths_are_within_base_dirs = all( + os.path.commonprefix([arg, os.path.realpath(value)]) == arg + for arg, value in zip(self.args, arguments) + if os.path.isabs(arg) # arguments specifying abs paths + ) + + return (equal_args_num and + exec_is_valid and + args_equal_or_pass and + paths_are_within_base_dirs) + + def get_command(self, userargs, exec_dirs=[]): + command, arguments = userargs[0], userargs[1:] + + # convert path values to canonical ones; copy other args as is + args = [os.path.realpath(value) if os.path.isabs(arg) else value + for arg, value in zip(self.args, arguments)] + + return super(PathFilter, self).get_command([command] + args, + exec_dirs) + + +class DnsmasqFilter(CommandFilter): + """Specific filter for the dnsmasq call (which includes env)""" + + CONFIG_FILE_ARG = 'CONFIG_FILE' + + def match(self, userargs): + if (userargs[0] == 'env' and + userargs[1].startswith(self.CONFIG_FILE_ARG) and + userargs[2].startswith('NETWORK_ID=') and + userargs[3] == 'dnsmasq'): + return True + return False + + def get_command(self, userargs, exec_dirs=[]): + to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path + dnsmasq_pos = userargs.index('dnsmasq') + return [to_exec] + userargs[dnsmasq_pos + 1:] + + def get_environment(self, userargs): + env = os.environ.copy() + env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1] + env['NETWORK_ID'] = userargs[2].split('=')[-1] + return env + + +class DeprecatedDnsmasqFilter(DnsmasqFilter): + """Variant of dnsmasq filter to support old-style FLAGFILE""" + CONFIG_FILE_ARG = 'FLAGFILE' + + +class KillFilter(CommandFilter): + """Specific filter for the kill calls. + 1st argument is the user to run /bin/kill under + 2nd argument is the location of the affected executable + Subsequent arguments list the accepted signals (if any) + + This filter relies on /proc to accurately determine affected + executable, so it will only work on procfs-capable systems (not OSX). + """ + + def __init__(self, *args): + super(KillFilter, self).__init__("/bin/kill", *args) + + def match(self, userargs): + if userargs[0] != "kill": + return False + args = list(userargs) + if len(args) == 3: + # A specific signal is requested + signal = args.pop(1) + if signal not in self.args[1:]: + # Requested signal not in accepted list + return False + else: + if len(args) != 2: + # Incorrect number of arguments + return False + if len(self.args) > 1: + # No signal requested, but filter requires specific signal + return False + try: + command = os.readlink("/proc/%d/exe" % int(args[1])) + # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on + # the end if an executable is updated or deleted + if command.endswith(" (deleted)"): + command = command[:command.rindex(" ")] + if command != self.args[0]: + # Affected executable does not match + return False + except (ValueError, OSError): + # Incorrect PID + return False + return True + + +class ReadFileFilter(CommandFilter): + """Specific filter for the utils.read_file_as_root call""" + + def __init__(self, file_path, *args): + self.file_path = file_path + super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) + + def match(self, userargs): + if userargs[0] != 'cat': + return False + if userargs[1] != self.file_path: + return False + if len(userargs) != 2: + return False + return True diff --git a/cinder/openstack/common/rootwrap/wrapper.py b/cinder/openstack/common/rootwrap/wrapper.py new file mode 100644 index 0000000000..a8ab123905 --- /dev/null +++ b/cinder/openstack/common/rootwrap/wrapper.py @@ -0,0 +1,149 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import ConfigParser +import logging +import logging.handlers +import os +import string + +from cinder.openstack.common.rootwrap import filters + + +class NoFilterMatched(Exception): + """This exception is raised when no filter matched.""" + pass + + +class FilterMatchNotExecutable(Exception): + """ + This exception is raised when a filter matched but no executable was + found. + """ + def __init__(self, match=None, **kwargs): + self.match = match + + +class RootwrapConfig(object): + + def __init__(self, config): + # filters_path + self.filters_path = config.get("DEFAULT", "filters_path").split(",") + + # exec_dirs + if config.has_option("DEFAULT", "exec_dirs"): + self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") + else: + # Use system PATH if exec_dirs is not specified + self.exec_dirs = os.environ["PATH"].split(':') + + # syslog_log_facility + if config.has_option("DEFAULT", "syslog_log_facility"): + v = config.get("DEFAULT", "syslog_log_facility") + facility_names = logging.handlers.SysLogHandler.facility_names + self.syslog_log_facility = getattr(logging.handlers.SysLogHandler, + v, None) + if self.syslog_log_facility is None and v in facility_names: + self.syslog_log_facility = facility_names.get(v) + if self.syslog_log_facility is None: + raise ValueError('Unexpected syslog_log_facility: %s' % v) + else: + default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG + self.syslog_log_facility = default_facility + + # syslog_log_level + if config.has_option("DEFAULT", "syslog_log_level"): + v = config.get("DEFAULT", "syslog_log_level") + self.syslog_log_level = logging.getLevelName(v.upper()) + if (self.syslog_log_level == "Level %s" % v.upper()): + raise ValueError('Unexepected syslog_log_level: %s' % v) + else: + self.syslog_log_level = logging.ERROR + + # use_syslog + if config.has_option("DEFAULT", "use_syslog"): + self.use_syslog = config.getboolean("DEFAULT", "use_syslog") + else: + self.use_syslog = False + + +def setup_syslog(execname, facility, level): + rootwrap_logger = logging.getLogger() + rootwrap_logger.setLevel(level) + handler = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + handler.setFormatter(logging.Formatter( + os.path.basename(execname) + ': %(message)s')) + rootwrap_logger.addHandler(handler) + + +def build_filter(class_name, *args): + """Returns a filter object of class class_name""" + if not hasattr(filters, class_name): + logging.warning("Skipping unknown filter class (%s) specified " + "in filter definitions" % class_name) + return None + filterclass = getattr(filters, class_name) + return filterclass(*args) + + +def load_filters(filters_path): + """Load filters from a list of directories""" + filterlist = [] + for filterdir in filters_path: + if not os.path.isdir(filterdir): + continue + for filterfile in os.listdir(filterdir): + filterconfig = ConfigParser.RawConfigParser() + filterconfig.read(os.path.join(filterdir, filterfile)) + for (name, value) in filterconfig.items("Filters"): + filterdefinition = [string.strip(s) for s in value.split(',')] + newfilter = build_filter(*filterdefinition) + if newfilter is None: + continue + newfilter.name = name + filterlist.append(newfilter) + return filterlist + + +def match_filter(filters, userargs, exec_dirs=[]): + """ + Checks user command and arguments through command filters and + returns the first matching filter. + Raises NoFilterMatched if no filter matched. + Raises FilterMatchNotExecutable if no executable was found for the + best filter match. + """ + first_not_executable_filter = None + + for f in filters: + if f.match(userargs): + # Try other filters if executable is absent + if not f.get_exec(exec_dirs=exec_dirs): + if not first_not_executable_filter: + first_not_executable_filter = f + continue + # Otherwise return matching filter for execution + return f + + if first_not_executable_filter: + # A filter matched, but no executable was found for it + raise FilterMatchNotExecutable(match=first_not_executable_filter) + + # No filter matched + raise NoFilterMatched() diff --git a/cinder/openstack/common/rpc/__init__.py b/cinder/openstack/common/rpc/__init__.py new file mode 100644 index 0000000000..3ffce83325 --- /dev/null +++ b/cinder/openstack/common/rpc/__init__.py @@ -0,0 +1,307 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A remote procedure call (rpc) abstraction. + +For some wrappers that add message versioning to rpc, see: + rpc.dispatcher + rpc.proxy +""" + +import inspect +import logging + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import local + + +LOG = logging.getLogger(__name__) + + +rpc_opts = [ + cfg.StrOpt('rpc_backend', + default='%s.impl_kombu' % __package__, + help="The messaging module to use, defaults to kombu."), + cfg.IntOpt('rpc_thread_pool_size', + default=64, + help='Size of RPC thread pool'), + cfg.IntOpt('rpc_conn_pool_size', + default=30, + help='Size of RPC connection pool'), + cfg.IntOpt('rpc_response_timeout', + default=60, + help='Seconds to wait for a response from call or multicall'), + cfg.IntOpt('rpc_cast_timeout', + default=30, + help='Seconds to wait before a cast expires (TTL). ' + 'Only supported by impl_zmq.'), + cfg.ListOpt('allowed_rpc_exception_modules', + default=['cinder.openstack.common.exception', + 'nova.exception', + 'cinder.exception', + 'exceptions', + ], + help='Modules of exceptions that are permitted to be recreated' + 'upon receiving exception data from an rpc call.'), + cfg.BoolOpt('fake_rabbit', + default=False, + help='If passed, use a fake RabbitMQ provider'), + cfg.StrOpt('control_exchange', + default='openstack', + help='AMQP exchange to connect to if using RabbitMQ or Qpid'), +] + +CONF = cfg.CONF +CONF.register_opts(rpc_opts) + + +def set_defaults(control_exchange): + cfg.set_defaults(rpc_opts, + control_exchange=control_exchange) + + +def create_connection(new=True): + """Create a connection to the message bus used for rpc. + + For some example usage of creating a connection and some consumers on that + connection, see nova.service. + + :param new: Whether or not to create a new connection. A new connection + will be created by default. If new is False, the + implementation is free to return an existing connection from a + pool. + + :returns: An instance of openstack.common.rpc.common.Connection + """ + return _get_impl().create_connection(CONF, new=new) + + +def _check_for_lock(): + if not CONF.debug: + return None + + if ((hasattr(local.strong_store, 'locks_held') + and local.strong_store.locks_held)): + stack = ' :: '.join([frame[3] for frame in inspect.stack()]) + LOG.warn(_('A RPC is being made while holding a lock. The locks ' + 'currently held are %(locks)s. This is probably a bug. ' + 'Please report it. Include the following: [%(stack)s].'), + {'locks': local.strong_store.locks_held, + 'stack': stack}) + return True + + return False + + +def call(context, topic, msg, timeout=None, check_for_lock=False): + """Invoke a remote method that returns something. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + :param check_for_lock: if True, a warning is emitted if a RPC call is made + with a lock held. + + :returns: A dict from the remote method. + + :raises: openstack.common.rpc.common.Timeout if a complete response + is not received before the timeout is reached. + """ + if check_for_lock: + _check_for_lock() + return _get_impl().call(CONF, context, topic, msg, timeout) + + +def cast(context, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast(CONF, context, topic, msg) + + +def fanout_cast(context, topic, msg): + """Broadcast a remote method invocation with no return. + + This method will get invoked on all consumers that were set up with this + topic name and fanout=True. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=True. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast(CONF, context, topic, msg) + + +def multicall(context, topic, msg, timeout=None, check_for_lock=False): + """Invoke a remote method and get back an iterator. + + In this case, the remote method will be returning multiple values in + separate messages, so the return values can be processed as the come in via + an iterator. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + :param check_for_lock: if True, a warning is emitted if a RPC call is made + with a lock held. + + :returns: An iterator. The iterator will yield a tuple (N, X) where N is + an index that starts at 0 and increases by one for each value + returned and X is the Nth value that was returned by the remote + method. + + :raises: openstack.common.rpc.common.Timeout if a complete response + is not received before the timeout is reached. + """ + if check_for_lock: + _check_for_lock() + return _get_impl().multicall(CONF, context, topic, msg, timeout) + + +def notify(context, topic, msg, envelope=False): + """Send notification event. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the notification to. + :param msg: This is a dict of content of event. + :param envelope: Set to True to enable message envelope for notifications. + + :returns: None + """ + return _get_impl().notify(cfg.CONF, context, topic, msg, envelope) + + +def cleanup(): + """Clean up resoruces in use by implementation. + + Clean up any resources that have been allocated by the RPC implementation. + This is typically open connections to a messaging service. This function + would get called before an application using this API exits to allow + connections to get torn down cleanly. + + :returns: None + """ + return _get_impl().cleanup() + + +def cast_to_server(context, server_params, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast_to_server(CONF, context, server_params, topic, + msg) + + +def fanout_cast_to_server(context, server_params, topic, msg): + """Broadcast to a remote method invocation with no return. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast_to_server(CONF, context, server_params, + topic, msg) + + +def queue_get_for(context, topic, host): + """Get a queue name for a given topic + host. + + This function only works if this naming convention is followed on the + consumer side, as well. For example, in nova, every instance of the + nova-foo service calls create_consumer() for two topics: + + foo + foo. + + Messages sent to the 'foo' topic are distributed to exactly one instance of + the nova-foo service. The services are chosen in a round-robin fashion. + Messages sent to the 'foo.' topic are sent to the nova-foo service on + . + """ + return '%s.%s' % (topic, host) if host else topic + + +_RPCIMPL = None + + +def _get_impl(): + """Delay import of rpc_backend until configuration is loaded.""" + global _RPCIMPL + if _RPCIMPL is None: + try: + _RPCIMPL = importutils.import_module(CONF.rpc_backend) + except ImportError: + # For backwards compatibility with older nova config. + impl = CONF.rpc_backend.replace('nova.rpc', + 'nova.openstack.common.rpc') + _RPCIMPL = importutils.import_module(impl) + return _RPCIMPL diff --git a/cinder/openstack/common/rpc/amqp.py b/cinder/openstack/common/rpc/amqp.py new file mode 100644 index 0000000000..9addfa1c76 --- /dev/null +++ b/cinder/openstack/common/rpc/amqp.py @@ -0,0 +1,677 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Shared code between AMQP based openstack.common.rpc implementations. + +The code in this module is shared between the rpc implemenations based on AMQP. +Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses +AMQP, but is deprecated and predates this code. +""" + +import collections +import inspect +import sys +import uuid + +from eventlet import greenpool +from eventlet import pools +from eventlet import queue +from eventlet import semaphore +# TODO(pekowsk): Remove import cfg and below comment in Havana. +# This import should no longer be needed when the amqp_rpc_single_reply_queue +# option is removed. +from oslo.config import cfg + +from cinder.openstack.common import excutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import local +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import common as rpc_common + + +# TODO(pekowski): Remove this option in Havana. +amqp_opts = [ + cfg.BoolOpt('amqp_rpc_single_reply_queue', + default=False, + help='Enable a fast single reply queue if using AMQP based ' + 'RPC like RabbitMQ or Qpid.'), +] + +cfg.CONF.register_opts(amqp_opts) + +UNIQUE_ID = '_unique_id' +LOG = logging.getLogger(__name__) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + def __init__(self, conf, connection_cls, *args, **kwargs): + self.connection_cls = connection_cls + self.conf = conf + kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) + kwargs.setdefault("order_as_stack", True) + super(Pool, self).__init__(*args, **kwargs) + self.reply_proxy = None + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug(_('Pool creating new connection')) + return self.connection_cls(self.conf) + + def empty(self): + while self.free_items: + self.get().close() + # Force a new connection pool to be created. + # Note that this was added due to failing unit test cases. The issue + # is the above "while loop" gets all the cached connections from the + # pool and closes them, but never returns them to the pool, a pool + # leak. The unit tests hang waiting for an item to be returned to the + # pool. The unit tests get here via the teatDown() method. In the run + # time code, it gets here via cleanup() and only appears in service.py + # just before doing a sys.exit(), so cleanup() only happens once and + # the leakage is not a problem. + self.connection_cls.pool = None + + +_pool_create_sem = semaphore.Semaphore() + + +def get_connection_pool(conf, connection_cls): + with _pool_create_sem: + # Make sure only one thread tries to create the connection pool. + if not connection_cls.pool: + connection_cls.pool = Pool(conf, connection_cls) + return connection_cls.pool + + +class ConnectionContext(rpc_common.Connection): + """The class that is actually returned to the caller of + create_connection(). This is essentially a wrapper around + Connection that supports 'with'. It can also return a new + Connection, or one from a pool. The function will also catch + when an instance of this class is to be deleted. With that + we can return Connections to the pool on exceptions and so + forth without making the caller be responsible for catching + them. If possible the function makes sure to return a + connection to the pool. + """ + + def __init__(self, conf, connection_pool, pooled=True, server_params=None): + """Create a new connection, or get one from the pool""" + self.connection = None + self.conf = conf + self.connection_pool = connection_pool + if pooled: + self.connection = connection_pool.get() + else: + self.connection = connection_pool.connection_cls( + conf, + server_params=server_params) + self.pooled = pooled + + def __enter__(self): + """When with ConnectionContext() is used, return self""" + return self + + def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + self.connection_pool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + pass + self.connection = None + + def __exit__(self, exc_type, exc_value, tb): + """End of 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Caller is done with this connection. Make sure we cleaned up.""" + self._done() + + def close(self): + """Caller is done with this connection.""" + self._done() + + def create_consumer(self, topic, proxy, fanout=False): + self.connection.create_consumer(topic, proxy, fanout) + + def create_worker(self, topic, proxy, pool_name): + self.connection.create_worker(topic, proxy, pool_name) + + def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + self.connection.join_consumer_pool(callback, + pool_name, + topic, + exchange_name) + + def consume_in_thread(self): + self.connection.consume_in_thread() + + def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" + if self.connection: + return getattr(self.connection, key) + else: + raise rpc_common.InvalidRPCConnectionReuse() + + +class ReplyProxy(ConnectionContext): + """ Connection class for RPC replies / callbacks """ + def __init__(self, conf, connection_pool): + self._call_waiters = {} + self._num_call_waiters = 0 + self._num_call_waiters_wrn_threshhold = 10 + self._reply_q = 'reply_' + uuid.uuid4().hex + super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) + self.declare_direct_consumer(self._reply_q, self._process_data) + self.consume_in_thread() + + def _process_data(self, message_data): + msg_id = message_data.pop('_msg_id', None) + waiter = self._call_waiters.get(msg_id) + if not waiter: + LOG.warn(_('no calling threads waiting for msg_id : %s' + ', message : %s') % (msg_id, message_data)) + else: + waiter.put(message_data) + + def add_call_waiter(self, waiter, msg_id): + self._num_call_waiters += 1 + if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: + LOG.warn(_('Number of call waiters is greater than warning ' + 'threshhold: %d. There could be a MulticallProxyWaiter ' + 'leak.') % self._num_call_waiters_wrn_threshhold) + self._num_call_waiters_wrn_threshhold *= 2 + self._call_waiters[msg_id] = waiter + + def del_call_waiter(self, msg_id): + self._num_call_waiters -= 1 + del self._call_waiters[msg_id] + + def get_reply_q(self): + return self._reply_q + + +def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, + failure=None, ending=False, log_failure=True): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext(conf, connection_pool) as conn: + if failure: + failure = rpc_common.serialize_remote_exception(failure, + log_failure) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + if ending: + msg['ending'] = True + _add_unique_id(msg) + # If a reply_q exists, add the msg_id to the reply and pass the + # reply_q to direct_send() to use it as the response queue. + # Otherwise use the msg_id for backward compatibilty. + if reply_q: + msg['_msg_id'] = msg_id + conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) + else: + conn.direct_send(msg_id, rpc_common.serialize_msg(msg)) + + +class RpcContext(rpc_common.CommonRpcContext): + """Context that supports replying to a rpc.call""" + def __init__(self, **kwargs): + self.msg_id = kwargs.pop('msg_id', None) + self.reply_q = kwargs.pop('reply_q', None) + self.conf = kwargs.pop('conf') + super(RpcContext, self).__init__(**kwargs) + + def deepcopy(self): + values = self.to_dict() + values['conf'] = self.conf + values['msg_id'] = self.msg_id + values['reply_q'] = self.reply_q + return self.__class__(**values) + + def reply(self, reply=None, failure=None, ending=False, + connection_pool=None, log_failure=True): + if self.msg_id: + msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool, + reply, failure, ending, log_failure) + if ending: + self.msg_id = None + + +def unpack_context(conf, msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + context_dict['reply_q'] = msg.pop('_reply_q', None) + context_dict['conf'] = conf + ctx = RpcContext.from_dict(context_dict) + rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) + return ctx + + +def pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class _MsgIdCache(object): + """This class checks any duplicate messages.""" + + # NOTE: This value is considered can be a configuration item, but + # it is not necessary to change its value in most cases, + # so let this value as static for now. + DUP_MSG_CHECK_SIZE = 16 + + def __init__(self, **kwargs): + self.prev_msgids = collections.deque([], + maxlen=self.DUP_MSG_CHECK_SIZE) + + def check_duplicate_message(self, message_data): + """AMQP consumers may read same message twice when exceptions occur + before ack is returned. This method prevents doing it. + """ + if UNIQUE_ID in message_data: + msg_id = message_data[UNIQUE_ID] + if msg_id not in self.prev_msgids: + self.prev_msgids.append(msg_id) + else: + raise rpc_common.DuplicateMessageError(msg_id=msg_id) + + +def _add_unique_id(msg): + """Add unique_id for checking duplicate messages.""" + unique_id = uuid.uuid4().hex + msg.update({UNIQUE_ID: unique_id}) + LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) + + +class _ThreadPoolWithWait(object): + """Base class for a delayed invocation manager used by + the Connection class to start up green threads + to handle incoming messages. + """ + + def __init__(self, conf, connection_pool): + self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) + self.connection_pool = connection_pool + self.conf = conf + + def wait(self): + """Wait for all callback threads to exit.""" + self.pool.waitall() + + +class CallbackWrapper(_ThreadPoolWithWait): + """Wraps a straight callback to allow it to be invoked in a green + thread. + """ + + def __init__(self, conf, callback, connection_pool): + """ + :param conf: cfg.CONF instance + :param callback: a callable (probably a function) + :param connection_pool: connection pool as returned by + get_connection_pool() + """ + super(CallbackWrapper, self).__init__( + conf=conf, + connection_pool=connection_pool, + ) + self.callback = callback + + def __call__(self, message_data): + self.pool.spawn_n(self.callback, message_data) + + +class ProxyCallback(_ThreadPoolWithWait): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, conf, proxy, connection_pool): + super(ProxyCallback, self).__init__( + conf=conf, + connection_pool=connection_pool, + ) + self.proxy = proxy + self.msg_id_cache = _MsgIdCache() + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + # It is important to clear the context here, because at this point + # the previous context is stored in local.store.context + if hasattr(local.store, 'context'): + del local.store.context + rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + self.msg_id_cache.check_duplicate_message(message_data) + ctxt = unpack_context(self.conf, message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + version = message_data.get('version') + namespace = message_data.get('namespace') + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data, + connection_pool=self.connection_pool) + return + self.pool.spawn_n(self._process_data, ctxt, version, method, + namespace, args) + + def _process_data(self, ctxt, version, method, namespace, args): + """Process a message in a new thread. + + If the proxy object we have has a dispatch method + (see rpc.dispatcher.RpcDispatcher), pass it the version, + method, and args and let it dispatch as appropriate. If not, use + the old behavior of magically calling the specified method on the + proxy we have here. + """ + ctxt.update_store() + try: + rval = self.proxy.dispatch(ctxt, version, method, namespace, + **args) + # Check if the result was a generator + if inspect.isgenerator(rval): + for x in rval: + ctxt.reply(x, None, connection_pool=self.connection_pool) + else: + ctxt.reply(rval, None, connection_pool=self.connection_pool) + # This final None tells multicall that it is done. + ctxt.reply(ending=True, connection_pool=self.connection_pool) + except rpc_common.ClientException as e: + LOG.debug(_('Expected exception during message handling (%s)') % + e._exc_info[1]) + ctxt.reply(None, e._exc_info, + connection_pool=self.connection_pool, + log_failure=False) + except Exception: + # sys.exc_info() is deleted by LOG.exception(). + exc_info = sys.exc_info() + LOG.error(_('Exception during message handling'), + exc_info=exc_info) + ctxt.reply(None, exc_info, connection_pool=self.connection_pool) + + +class MulticallProxyWaiter(object): + def __init__(self, conf, msg_id, timeout, connection_pool): + self._msg_id = msg_id + self._timeout = timeout or conf.rpc_response_timeout + self._reply_proxy = connection_pool.reply_proxy + self._done = False + self._got_ending = False + self._conf = conf + self._dataqueue = queue.LightQueue() + # Add this caller to the reply proxy's call_waiters + self._reply_proxy.add_call_waiter(self, self._msg_id) + self.msg_id_cache = _MsgIdCache() + + def put(self, data): + self._dataqueue.put(data) + + def done(self): + if self._done: + return + self._done = True + # Remove this caller from reply proxy's call_waiters + self._reply_proxy.del_call_waiter(self._msg_id) + + def _process_data(self, data): + result = None + self.msg_id_cache.check_duplicate_message(data) + if data['failure']: + failure = data['failure'] + result = rpc_common.deserialize_remote_exception(self._conf, + failure) + elif data.get('ending', False): + self._got_ending = True + else: + result = data['result'] + return result + + def __iter__(self): + """Return a result until we get a reply with an 'ending" flag""" + if self._done: + raise StopIteration + while True: + try: + data = self._dataqueue.get(timeout=self._timeout) + result = self._process_data(data) + except queue.Empty: + self.done() + raise rpc_common.Timeout() + except Exception: + with excutils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + if isinstance(result, Exception): + self.done() + raise result + yield result + + +#TODO(pekowski): Remove MulticallWaiter() in Havana. +class MulticallWaiter(object): + def __init__(self, conf, connection, timeout): + self._connection = connection + self._iterator = connection.iterconsume(timeout=timeout or + conf.rpc_response_timeout) + self._result = None + self._done = False + self._got_ending = False + self._conf = conf + self.msg_id_cache = _MsgIdCache() + + def done(self): + if self._done: + return + self._done = True + self._iterator.close() + self._iterator = None + self._connection.close() + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + self.msg_id_cache.check_duplicate_message(data) + if data['failure']: + failure = data['failure'] + self._result = rpc_common.deserialize_remote_exception(self._conf, + failure) + + elif data.get('ending', False): + self._got_ending = True + else: + self._result = data['result'] + + def __iter__(self): + """Return a result until we get a 'None' response from consumer""" + if self._done: + raise StopIteration + while True: + try: + self._iterator.next() + except Exception: + with excutils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + result = self._result + if isinstance(result, Exception): + self.done() + raise result + yield result + + +def create_connection(conf, new, connection_pool): + """Create a connection""" + return ConnectionContext(conf, connection_pool, pooled=not new) + + +_reply_proxy_create_sem = semaphore.Semaphore() + + +def multicall(conf, context, topic, msg, timeout, connection_pool): + """Make a call that returns multiple times.""" + # TODO(pekowski): Remove all these comments in Havana. + # For amqp_rpc_single_reply_queue = False, + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + # For amqp_rpc_single_reply_queue = True, + # The 'with' statement is mandatory for closing the connection + LOG.debug(_('Making synchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _add_unique_id(msg) + pack_context(msg, context) + + # TODO(pekowski): Remove this flag and the code under the if clause + # in Havana. + if not conf.amqp_rpc_single_reply_queue: + conn = ConnectionContext(conf, connection_pool) + wait_msg = MulticallWaiter(conf, conn, timeout) + conn.declare_direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) + else: + with _reply_proxy_create_sem: + if not connection_pool.reply_proxy: + connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) + msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) + wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) + return wait_msg + + +def call(conf, context, topic, msg, timeout, connection_pool): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout, connection_pool) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg, connection_pool): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg)) + + +def fanout_cast(conf, context, topic, msg, connection_pool): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.fanout_send(topic, rpc_common.serialize_msg(msg)) + + +def cast_to_server(conf, context, server_params, topic, msg, connection_pool): + """Sends a message on a topic to a specific server.""" + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg, + connection_pool): + """Sends a message on a fanout exchange to a specific server.""" + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.fanout_send(topic, rpc_common.serialize_msg(msg)) + + +def notify(conf, context, topic, msg, connection_pool, envelope): + """Sends a notification event on a topic.""" + LOG.debug(_('Sending %(event_type)s on %(topic)s'), + dict(event_type=msg.get('event_type'), + topic=topic)) + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + if envelope: + msg = rpc_common.serialize_msg(msg) + conn.notify_send(topic, msg) + + +def cleanup(connection_pool): + if connection_pool: + connection_pool.empty() + + +def get_control_exchange(conf): + return conf.control_exchange diff --git a/cinder/openstack/common/rpc/common.py b/cinder/openstack/common/rpc/common.py new file mode 100644 index 0000000000..9f0552e5e9 --- /dev/null +++ b/cinder/openstack/common/rpc/common.py @@ -0,0 +1,508 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import sys +import traceback + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import local +from cinder.openstack.common import log as logging + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +'''RPC Envelope Version. + +This version number applies to the top level structure of messages sent out. +It does *not* apply to the message payload, which must be versioned +independently. For example, when using rpc APIs, a version number is applied +for changes to the API being exposed over rpc. This version number is handled +in the rpc proxy and dispatcher modules. + +This version number applies to the message envelope that is used in the +serialization done inside the rpc layer. See serialize_msg() and +deserialize_msg(). + +The current message format (version 2.0) is very simple. It is: + + { + 'oslo.version': , + 'oslo.message': + } + +Message format version '1.0' is just considered to be the messages we sent +without a message envelope. + +So, the current message envelope just includes the envelope version. It may +eventually contain additional information, such as a signature for the message +payload. + +We will JSON encode the application message payload. The message envelope, +which includes the JSON encoded application message body, will be passed down +to the messaging libraries as a dict. +''' +_RPC_ENVELOPE_VERSION = '2.0' + +_VERSION_KEY = 'oslo.version' +_MESSAGE_KEY = 'oslo.message' + + +class RPCException(Exception): + message = _("An unknown RPC related exception occurred.") + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if not message: + try: + message = self.message % kwargs + + except Exception: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + # at least get the core message out if something happened + message = self.message + + super(RPCException, self).__init__(message) + + +class RemoteError(RPCException): + """Signifies that a remote class has raised an exception. + + Contains a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevant info. + + """ + message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") + + def __init__(self, exc_type=None, value=None, traceback=None): + self.exc_type = exc_type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__(exc_type=exc_type, + value=value, + traceback=traceback) + + +class Timeout(RPCException): + """Signifies that a timeout has occurred. + + This exception is raised if the rpc_response_timeout is reached while + waiting for a response from the remote side. + """ + message = _('Timeout while waiting on RPC response - ' + 'topic: "%(topic)s", RPC method: "%(method)s" ' + 'info: "%(info)s"') + + def __init__(self, info=None, topic=None, method=None): + """ + :param info: Extra info to convey to the user + :param topic: The topic that the rpc call was sent to + :param rpc_method_name: The name of the rpc method being + called + """ + self.info = info + self.topic = topic + self.method = method + super(Timeout, self).__init__( + None, + info=info or _(''), + topic=topic or _(''), + method=method or _('')) + + +class DuplicateMessageError(RPCException): + message = _("Found duplicate message(%(msg_id)s). Skipping it.") + + +class InvalidRPCConnectionReuse(RPCException): + message = _("Invalid reuse of an RPC connection.") + + +class UnsupportedRpcVersion(RPCException): + message = _("Specified RPC version, %(version)s, not supported by " + "this endpoint.") + + +class UnsupportedRpcEnvelopeVersion(RPCException): + message = _("Specified RPC envelope version, %(version)s, " + "not supported by this endpoint.") + + +class Connection(object): + """A connection, returned by rpc.create_connection(). + + This class represents a connection to the message bus used for rpc. + An instance of this class should never be created by users of the rpc API. + Use rpc.create_connection() instead. + """ + def close(self): + """Close the connection. + + This method must be called when the connection will no longer be used. + It will ensure that any resources associated with the connection, such + as a network connection, and cleaned up. + """ + raise NotImplementedError() + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer on this connection. + + A consumer is associated with a message queue on the backend message + bus. The consumer will read messages from the queue, unpack them, and + dispatch them to the proxy object. The contents of the message pulled + off of the queue will determine which method gets called on the proxy + object. + + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. For example, all instances of nova-compute consume + from a queue called "compute". In that case, the + messages will get distributed amongst the consumers in a + round-robin fashion if fanout=False. If fanout=True, + every consumer associated with this topic will get a + copy of every message. + :param proxy: The object that will handle all incoming messages. + :param fanout: Whether or not this is a fanout topic. See the + documentation for the topic parameter for some + additional comments on this. + """ + raise NotImplementedError() + + def create_worker(self, topic, proxy, pool_name): + """Create a worker on this connection. + + A worker is like a regular consumer of messages directed to a + topic, except that it is part of a set of such consumers (the + "pool") which may run in parallel. Every pool of workers will + receive a given message, but only one worker in the pool will + be asked to process it. Load is distributed across the members + of the pool in round-robin fashion. + + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. + :param proxy: The object that will handle all incoming messages. + :param pool_name: String containing the name of the pool of workers + """ + raise NotImplementedError() + + def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + + :param callback: Callable to be invoked for each message. + :type callback: callable accepting one argument + :param pool_name: The name of the consumer pool. + :type pool_name: str + :param topic: The routing topic for desired messages. + :type topic: str + :param exchange_name: The name of the message exchange where + the client should attach. Defaults to + the configured exchange. + :type exchange_name: str + """ + raise NotImplementedError() + + def consume_in_thread(self): + """Spawn a thread to handle incoming messages. + + Spawn a thread that will be responsible for handling all incoming + messages for consumers that were set up on this connection. + + Message dispatching inside of this is expected to be implemented in a + non-blocking manner. An example implementation would be having this + thread pull messages in for all of the consumers, but utilize a thread + pool for dispatching the messages to the proxy objects. + """ + raise NotImplementedError() + + +def _safe_log(log_func, msg, msg_data): + """Sanitizes the msg_data field before logging.""" + SANITIZE = {'set_admin_password': [('args', 'new_pass')], + 'run_instance': [('args', 'admin_password')], + 'route_message': [('args', 'message', 'args', 'method_info', + 'method_kwargs', 'password'), + ('args', 'message', 'args', 'method_info', + 'method_kwargs', 'admin_password')]} + + has_method = 'method' in msg_data and msg_data['method'] in SANITIZE + has_context_token = '_context_auth_token' in msg_data + has_token = 'auth_token' in msg_data + + if not any([has_method, has_context_token, has_token]): + return log_func(msg, msg_data) + + msg_data = copy.deepcopy(msg_data) + + if has_method: + for arg in SANITIZE.get(msg_data['method'], []): + try: + d = msg_data + for elem in arg[:-1]: + d = d[elem] + d[arg[-1]] = '' + except KeyError, e: + LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), + {'item': arg, + 'err': e}) + + if has_context_token: + msg_data['_context_auth_token'] = '' + + if has_token: + msg_data['auth_token'] = '' + + return log_func(msg, msg_data) + + +def serialize_remote_exception(failure_info, log_failure=True): + """Prepares exception data to be sent over rpc. + + Failure_info should be a sys.exc_info() tuple. + + """ + tb = traceback.format_exception(*failure_info) + failure = failure_info[1] + if log_failure: + LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(tb) + + kwargs = {} + if hasattr(failure, 'kwargs'): + kwargs = failure.kwargs + + data = { + 'class': str(failure.__class__.__name__), + 'module': str(failure.__class__.__module__), + 'message': unicode(failure), + 'tb': tb, + 'args': failure.args, + 'kwargs': kwargs + } + + json_data = jsonutils.dumps(data) + + return json_data + + +def deserialize_remote_exception(conf, data): + failure = jsonutils.loads(str(data)) + + trace = failure.get('tb', []) + message = failure.get('message', "") + "\n" + "\n".join(trace) + name = failure.get('class') + module = failure.get('module') + + # NOTE(ameade): We DO NOT want to allow just any module to be imported, in + # order to prevent arbitrary code execution. + if module not in conf.allowed_rpc_exception_modules: + return RemoteError(name, failure.get('message'), trace) + + try: + mod = importutils.import_module(module) + klass = getattr(mod, name) + if not issubclass(klass, Exception): + raise TypeError("Can only deserialize Exceptions") + + failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) + except (AttributeError, TypeError, ImportError): + return RemoteError(name, failure.get('message'), trace) + + ex_type = type(failure) + str_override = lambda self: message + new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), + {'__str__': str_override, '__unicode__': str_override}) + try: + # NOTE(ameade): Dynamically create a new exception type and swap it in + # as the new type for the exception. This only works on user defined + # Exceptions and not core python exceptions. This is important because + # we cannot necessarily change an exception message so we must override + # the __str__ method. + failure.__class__ = new_ex_type + except TypeError: + # NOTE(ameade): If a core exception then just add the traceback to the + # first exception argument. + failure.args = (message,) + failure.args[1:] + return failure + + +class CommonRpcContext(object): + def __init__(self, **kwargs): + self.values = kwargs + + def __getattr__(self, key): + try: + return self.values[key] + except KeyError: + raise AttributeError(key) + + def to_dict(self): + return copy.deepcopy(self.values) + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def deepcopy(self): + return self.from_dict(self.to_dict()) + + def update_store(self): + local.store.context = self + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + # TODO(russellb) This method is a bit of a nova-ism. It makes + # some assumptions about the data in the request context sent + # across rpc, while the rest of this class does not. We could get + # rid of this if we changed the nova code that uses this to + # convert the RpcContext back to its native RequestContext doing + # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) + + context = self.deepcopy() + context.values['is_admin'] = True + + context.values.setdefault('roles', []) + + if 'admin' not in context.values['roles']: + context.values['roles'].append('admin') + + if read_deleted is not None: + context.values['read_deleted'] = read_deleted + + return context + + +class ClientException(Exception): + """This encapsulates some actual exception that is expected to be + hit by an RPC proxy object. Merely instantiating it records the + current exception information, which will be passed back to the + RPC client without exceptional logging.""" + def __init__(self): + self._exc_info = sys.exc_info() + + +def catch_client_exception(exceptions, func, *args, **kwargs): + try: + return func(*args, **kwargs) + except Exception, e: + if type(e) in exceptions: + raise ClientException() + else: + raise + + +def client_exceptions(*exceptions): + """Decorator for manager methods that raise expected exceptions. + Marking a Manager method with this decorator allows the declaration + of expected exceptions that the RPC layer should not consider fatal, + and not log as if they were generated in a real error scenario. Note + that this will cause listed exceptions to be wrapped in a + ClientException, which is used internally by the RPC layer.""" + def outer(func): + def inner(*args, **kwargs): + return catch_client_exception(exceptions, func, *args, **kwargs) + return inner + return outer + + +def version_is_compatible(imp_version, version): + """Determine whether versions are compatible. + + :param imp_version: The version implemented + :param version: The version requested by an incoming message. + """ + version_parts = version.split('.') + imp_version_parts = imp_version.split('.') + if int(version_parts[0]) != int(imp_version_parts[0]): # Major + return False + if int(version_parts[1]) > int(imp_version_parts[1]): # Minor + return False + return True + + +def serialize_msg(raw_msg): + # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more + # information about this format. + msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, + _MESSAGE_KEY: jsonutils.dumps(raw_msg)} + + return msg + + +def deserialize_msg(msg): + # NOTE(russellb): Hang on to your hats, this road is about to + # get a little bumpy. + # + # Robustness Principle: + # "Be strict in what you send, liberal in what you accept." + # + # At this point we have to do a bit of guessing about what it + # is we just received. Here is the set of possibilities: + # + # 1) We received a dict. This could be 2 things: + # + # a) Inspect it to see if it looks like a standard message envelope. + # If so, great! + # + # b) If it doesn't look like a standard message envelope, it could either + # be a notification, or a message from before we added a message + # envelope (referred to as version 1.0). + # Just return the message as-is. + # + # 2) It's any other non-dict type. Just return it and hope for the best. + # This case covers return values from rpc.call() from before message + # envelopes were used. (messages to call a method were always a dict) + + if not isinstance(msg, dict): + # See #2 above. + return msg + + base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) + if not all(map(lambda key: key in msg, base_envelope_keys)): + # See #1.b above. + return msg + + # At this point we think we have the message envelope + # format we were expecting. (#1.a above) + + if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): + raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) + + raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) + + return raw_msg diff --git a/cinder/openstack/common/rpc/dispatcher.py b/cinder/openstack/common/rpc/dispatcher.py new file mode 100644 index 0000000000..85195d4a74 --- /dev/null +++ b/cinder/openstack/common/rpc/dispatcher.py @@ -0,0 +1,153 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Code for rpc message dispatching. + +Messages that come in have a version number associated with them. RPC API +version numbers are in the form: + + Major.Minor + +For a given message with version X.Y, the receiver must be marked as able to +handle messages of version A.B, where: + + A = X + + B >= Y + +The Major version number would be incremented for an almost completely new API. +The Minor version number would be incremented for backwards compatible changes +to an existing API. A backwards compatible change could be something like +adding a new method, adding an argument to an existing method (but not +requiring it), or changing the type for an existing argument (but still +handling the old type as well). + +The conversion over to a versioned API must be done on both the client side and +server side of the API at the same time. However, as the code stands today, +there can be both versioned and unversioned APIs implemented in the same code +base. + +EXAMPLES +======== + +Nova was the first project to use versioned rpc APIs. Consider the compute rpc +API as an example. The client side is in nova/compute/rpcapi.py and the server +side is in nova/compute/manager.py. + + +Example 1) Adding a new method. +------------------------------- + +Adding a new method is a backwards compatible change. It should be added to +nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to +X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should +have a specific version specified to indicate the minimum API version that must +be implemented for the method to be supported. For example:: + + def get_host_uptime(self, ctxt, host): + topic = _compute_topic(self.topic, ctxt, host, None) + return self.call(ctxt, self.make_msg('get_host_uptime'), topic, + version='1.1') + +In this case, version '1.1' is the first version that supported the +get_host_uptime() method. + + +Example 2) Adding a new parameter. +---------------------------------- + +Adding a new parameter to an rpc method can be made backwards compatible. The +RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. +The implementation of the method must not expect the parameter to be present.:: + + def some_remote_method(self, arg1, arg2, newarg=None): + # The code needs to deal with newarg=None for cases + # where an older client sends a message without it. + pass + +On the client side, the same changes should be made as in example 1. The +minimum version that supports the new parameter should be specified. +""" + +from cinder.openstack.common.rpc import common as rpc_common + + +class RpcDispatcher(object): + """Dispatch rpc messages according to the requested API version. + + This class can be used as the top level 'manager' for a service. It + contains a list of underlying managers that have an API_VERSION attribute. + """ + + def __init__(self, callbacks): + """Initialize the rpc dispatcher. + + :param callbacks: List of proxy objects that are an instance + of a class with rpc methods exposed. Each proxy + object should have an RPC_API_VERSION attribute. + """ + self.callbacks = callbacks + super(RpcDispatcher, self).__init__() + + def dispatch(self, ctxt, version, method, namespace, **kwargs): + """Dispatch a message based on a requested version. + + :param ctxt: The request context + :param version: The requested API version from the incoming message + :param method: The method requested to be called by the incoming + message. + :param namespace: The namespace for the requested method. If None, + the dispatcher will look for a method on a callback + object with no namespace set. + :param kwargs: A dict of keyword arguments to be passed to the method. + + :returns: Whatever is returned by the underlying method that gets + called. + """ + if not version: + version = '1.0' + + had_compatible = False + for proxyobj in self.callbacks: + # Check for namespace compatibility + try: + cb_namespace = proxyobj.RPC_API_NAMESPACE + except AttributeError: + cb_namespace = None + + if namespace != cb_namespace: + continue + + # Check for version compatibility + try: + rpc_api_version = proxyobj.RPC_API_VERSION + except AttributeError: + rpc_api_version = '1.0' + + is_compatible = rpc_common.version_is_compatible(rpc_api_version, + version) + had_compatible = had_compatible or is_compatible + + if not hasattr(proxyobj, method): + continue + if is_compatible: + return getattr(proxyobj, method)(ctxt, **kwargs) + + if had_compatible: + raise AttributeError("No such RPC function '%s'" % method) + else: + raise rpc_common.UnsupportedRpcVersion(version=version) diff --git a/cinder/openstack/common/rpc/impl_fake.py b/cinder/openstack/common/rpc/impl_fake.py new file mode 100644 index 0000000000..ec7200a7b6 --- /dev/null +++ b/cinder/openstack/common/rpc/impl_fake.py @@ -0,0 +1,195 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fake RPC implementation which calls proxy methods directly with no +queues. Casts will block, but this is very useful for tests. +""" + +import inspect +# NOTE(russellb): We specifically want to use json, not our own jsonutils. +# jsonutils has some extra logic to automatically convert objects to primitive +# types so that they can be serialized. We want to catch all cases where +# non-primitive types make it into this code and treat it as an error. +import json +import time + +import eventlet + +from cinder.openstack.common.rpc import common as rpc_common + +CONSUMERS = {} + + +class RpcContext(rpc_common.CommonRpcContext): + def __init__(self, **kwargs): + super(RpcContext, self).__init__(**kwargs) + self._response = [] + self._done = False + + def deepcopy(self): + values = self.to_dict() + new_inst = self.__class__(**values) + new_inst._response = self._response + new_inst._done = self._done + return new_inst + + def reply(self, reply=None, failure=None, ending=False): + if ending: + self._done = True + if not self._done: + self._response.append((reply, failure)) + + +class Consumer(object): + def __init__(self, topic, proxy): + self.topic = topic + self.proxy = proxy + + def call(self, context, version, method, namespace, args, timeout): + done = eventlet.event.Event() + + def _inner(): + ctxt = RpcContext.from_dict(context.to_dict()) + try: + rval = self.proxy.dispatch(context, version, method, + namespace, **args) + res = [] + # Caller might have called ctxt.reply() manually + for (reply, failure) in ctxt._response: + if failure: + raise failure[0], failure[1], failure[2] + res.append(reply) + # if ending not 'sent'...we might have more data to + # return from the function itself + if not ctxt._done: + if inspect.isgenerator(rval): + for val in rval: + res.append(val) + else: + res.append(rval) + done.send(res) + except rpc_common.ClientException as e: + done.send_exception(e._exc_info[1]) + except Exception as e: + done.send_exception(e) + + thread = eventlet.greenthread.spawn(_inner) + + if timeout: + start_time = time.time() + while not done.ready(): + eventlet.greenthread.sleep(1) + cur_time = time.time() + if (cur_time - start_time) > timeout: + thread.kill() + raise rpc_common.Timeout() + + return done.wait() + + +class Connection(object): + """Connection object.""" + + def __init__(self): + self.consumers = [] + + def create_consumer(self, topic, proxy, fanout=False): + consumer = Consumer(topic, proxy) + self.consumers.append(consumer) + if topic not in CONSUMERS: + CONSUMERS[topic] = [] + CONSUMERS[topic].append(consumer) + + def close(self): + for consumer in self.consumers: + CONSUMERS[consumer.topic].remove(consumer) + self.consumers = [] + + def consume_in_thread(self): + pass + + +def create_connection(conf, new=True): + """Create a connection""" + return Connection() + + +def check_serialize(msg): + """Make sure a message intended for rpc can be serialized.""" + json.dumps(msg) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + + check_serialize(msg) + + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + version = msg.get('version', None) + namespace = msg.get('namespace', None) + + try: + consumer = CONSUMERS[topic][0] + except (KeyError, IndexError): + return iter([None]) + else: + return consumer.call(context, version, method, namespace, args, + timeout) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg): + check_serialize(msg) + try: + call(conf, context, topic, msg) + except Exception: + pass + + +def notify(conf, context, topic, msg, envelope): + check_serialize(msg) + + +def cleanup(): + pass + + +def fanout_cast(conf, context, topic, msg): + """Cast to all consumers of a topic""" + check_serialize(msg) + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + version = msg.get('version', None) + namespace = msg.get('namespace', None) + + for consumer in CONSUMERS.get(topic, []): + try: + consumer.call(context, version, method, namespace, args, None) + except Exception: + pass diff --git a/cinder/openstack/common/rpc/impl_kombu.py b/cinder/openstack/common/rpc/impl_kombu.py new file mode 100644 index 0000000000..681f531843 --- /dev/null +++ b/cinder/openstack/common/rpc/impl_kombu.py @@ -0,0 +1,838 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import itertools +import socket +import ssl +import sys +import time +import uuid + +import eventlet +import greenlet +import kombu +import kombu.connection +import kombu.entity +import kombu.messaging +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import network_utils +from cinder.openstack.common.rpc import amqp as rpc_amqp +from cinder.openstack.common.rpc import common as rpc_common + +kombu_opts = [ + cfg.StrOpt('kombu_ssl_version', + default='', + help='SSL version to use (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_keyfile', + default='', + help='SSL key file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_certfile', + default='', + help='SSL cert file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_ca_certs', + default='', + help=('SSL certification authority file ' + '(valid only if SSL enabled)')), + cfg.StrOpt('rabbit_host', + default='localhost', + help='The RabbitMQ broker address where a single node is used'), + cfg.IntOpt('rabbit_port', + default=5672, + help='The RabbitMQ broker port where a single node is used'), + cfg.ListOpt('rabbit_hosts', + default=['$rabbit_host:$rabbit_port'], + help='RabbitMQ HA cluster host:port pairs'), + cfg.BoolOpt('rabbit_use_ssl', + default=False, + help='connect over SSL for RabbitMQ'), + cfg.StrOpt('rabbit_userid', + default='guest', + help='the RabbitMQ userid'), + cfg.StrOpt('rabbit_password', + default='guest', + help='the RabbitMQ password', + secret=True), + cfg.StrOpt('rabbit_virtual_host', + default='/', + help='the RabbitMQ virtual host'), + cfg.IntOpt('rabbit_retry_interval', + default=1, + help='how frequently to retry connecting with RabbitMQ'), + cfg.IntOpt('rabbit_retry_backoff', + default=2, + help='how long to backoff for between retries when connecting ' + 'to RabbitMQ'), + cfg.IntOpt('rabbit_max_retries', + default=0, + help='maximum retries with trying to connect to RabbitMQ ' + '(the default of 0 implies an infinite retry count)'), + cfg.BoolOpt('rabbit_durable_queues', + default=False, + help='use durable queues in RabbitMQ'), + cfg.BoolOpt('rabbit_ha_queues', + default=False, + help='use H/A queues in RabbitMQ (x-ha-policy: all).' + 'You need to wipe RabbitMQ database when ' + 'changing this option.'), + +] + +cfg.CONF.register_opts(kombu_opts) + +LOG = rpc_common.LOG + + +def _get_queue_arguments(conf): + """Construct the arguments for declaring a queue. + + If the rabbit_ha_queues option is set, we declare a mirrored queue + as described here: + + http://www.rabbitmq.com/ha.html + + Setting x-ha-policy to all means that the queue will be mirrored + to all nodes in the cluster. + """ + return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Declare a queue on an amqp channel. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-declare the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + try: + msg = rpc_common.deserialize_msg(message.payload) + callback(msg) + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + finally: + message.ack() + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange(name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectConsumer, self).__init__(channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, channel, topic, callback, tag, name=None, + exchange_name=None, **kwargs): + """Init a 'topic' queue. + + :param channel: the amqp channel to use + :param topic: the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param tag: a unique ID for the consumer on the channel + :param name: optional queue name, defaults to topic + :paramtype name: str + + Other kombu options may be passed as keyword arguments + """ + # Default options + options = {'durable': conf.rabbit_durable_queues, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + exchange = kombu.entity.Exchange(name=exchange_name, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicConsumer, self).__init__(channel, + callback, + tag, + name=name or topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutConsumer, self).__init__(channel, callback, tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, + routing_key=self.routing_key) + + def send(self, msg, timeout=None): + """Send a message""" + if timeout: + # + # AMQP TTL is in milliseconds when set in the header. + # + self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) + else: + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, msg_id, msg_id, + type='direct', **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': conf.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(channel, + exchange_name, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, + None, type='fanout', **options) + + +class NotifyPublisher(TopicPublisher): + """Publisher class for 'notify'""" + + def __init__(self, conf, channel, topic, **kwargs): + self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.queue_arguments = _get_queue_arguments(conf) + super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) + + def reconnect(self, channel): + super(NotifyPublisher, self).reconnect(channel) + + # NOTE(jerdfelt): Normally the consumer would create the queue, but + # we do this to ensure that messages don't get dropped if the + # consumer is started after we do + queue = kombu.entity.Queue(channel=channel, + exchange=self.exchange, + durable=self.durable, + name=self.routing_key, + routing_key=self.routing_key, + queue_arguments=self.queue_arguments) + queue.declare() + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + self.consumers = [] + self.consumer_thread = None + self.proxy_callbacks = [] + self.conf = conf + self.max_retries = self.conf.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None + self.interval_start = self.conf.rabbit_retry_interval + self.interval_stepping = self.conf.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 + self.memory_transport = False + + if server_params is None: + server_params = {} + # Keys to translate from server_params to kombu params + server_params_to_kombu_params = {'username': 'userid'} + + ssl_params = self._fetch_ssl_params() + params_list = [] + for adr in self.conf.rabbit_hosts: + hostname, port = network_utils.parse_host_port( + adr, default_port=self.conf.rabbit_port) + + params = { + 'hostname': hostname, + 'port': port, + 'userid': self.conf.rabbit_userid, + 'password': self.conf.rabbit_password, + 'virtual_host': self.conf.rabbit_virtual_host, + } + + for sp_key, value in server_params.iteritems(): + p_key = server_params_to_kombu_params.get(sp_key, sp_key) + params[p_key] = value + + if self.conf.fake_rabbit: + params['transport'] = 'memory' + if self.conf.rabbit_use_ssl: + params['ssl'] = ssl_params + + params_list.append(params) + + self.params_list = params_list + + self.memory_transport = self.conf.fake_rabbit + + self.connection = None + self.reconnect() + + def _fetch_ssl_params(self): + """Handles fetching what ssl params + should be used for the connection (if any)""" + ssl_params = dict() + + # http://docs.python.org/library/ssl.html - ssl.wrap_socket + if self.conf.kombu_ssl_version: + ssl_params['ssl_version'] = self.conf.kombu_ssl_version + if self.conf.kombu_ssl_keyfile: + ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile + if self.conf.kombu_ssl_certfile: + ssl_params['certfile'] = self.conf.kombu_ssl_certfile + if self.conf.kombu_ssl_ca_certs: + ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs + # We might want to allow variations in the + # future with this? + ssl_params['cert_reqs'] = ssl.CERT_REQUIRED + + if not ssl_params: + # Just have the default behavior + return True + else: + # Return the extended behavior + return ssl_params + + def _connect(self, params): + """Connect to rabbit. Re-establish any queues that may have + been declared before if we are reconnecting. Exceptions should + be handled by the caller. + """ + if self.connection: + LOG.info(_("Reconnecting to AMQP server on " + "%(hostname)s:%(port)d") % params) + try: + self.connection.release() + except self.connection_errors: + pass + # Setting this in case the next statement fails, though + # it shouldn't be doing any network operations, yet. + self.connection = None + self.connection = kombu.connection.BrokerConnection(**params) + self.connection_errors = self.connection.connection_errors + if self.memory_transport: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 + self.consumer_num = itertools.count(1) + self.connection.connect() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + for consumer in self.consumers: + consumer.reconnect(self.channel) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % + params) + + def reconnect(self): + """Handles reconnecting and re-establishing queues. + Will retry up to self.max_retries number of times. + self.max_retries = 0 means to retry forever. + Sleep between tries, starting at self.interval_start + seconds, backing off self.interval_stepping number of seconds + each attempt. + """ + + attempt = 0 + while True: + params = self.params_list[attempt % len(self.params_list)] + attempt += 1 + try: + self._connect(params) + return + except (IOError, self.connection_errors) as e: + pass + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + + log_info = {} + log_info['err_str'] = str(e) + log_info['max_retries'] = self.max_retries + log_info.update(params) + + if self.max_retries and attempt == self.max_retries: + LOG.error(_('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info) + # NOTE(comstud): Copied from original code. There's + # really no better recourse because if this was a queue we + # need to consume on, we have no way to consume anymore. + sys.exit(1) + + if attempt == 1: + sleep_time = self.interval_start or 1 + elif attempt > 1: + sleep_time += self.interval_stepping + if self.interval_max: + sleep_time = min(sleep_time, self.interval_max) + + log_info['sleep_time'] = sleep_time + LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' + 'unreachable: %(err_str)s. Trying again in ' + '%(sleep_time)d seconds.') % log_info) + time.sleep(sleep_time) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (self.connection_errors, socket.timeout, IOError), e: + if error_callback: + error_callback(e) + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + if error_callback: + error_callback(e) + self.reconnect() + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.channel.close() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + self.consumers = [] + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + info = {'do_consume': True} + + def _error_callback(exc): + if isinstance(exc, socket.timeout): + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + info['do_consume'] = True + + def _consume(): + if info['do_consume']: + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + info['do_consume'] = False + return self.connection.drain_events(timeout=timeout) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def wait_on_proxy_callbacks(self): + """Wait for all proxy callback threads to exit.""" + for proxy_cb in self.proxy_callbacks: + proxy_cb.wait() + + def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): + """Send to a publisher based on the publisher class""" + + def _error_callback(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publish(): + publisher = cls(self.conf, self.channel, topic, **kwargs) + publisher.send(msg, timeout) + + self.ensure(_error_callback, _publish) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): + """Create a 'topic' consumer.""" + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + exchange_name=exchange_name, + ), + topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg, timeout=None): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg, timeout) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + + if fanout: + self.declare_fanout_consumer(topic, proxy_cb) + else: + self.declare_topic_consumer(topic, proxy_cb) + + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + self.declare_topic_consumer(topic, proxy_cb, pool_name) + + def join_consumer_pool(self, callback, pool_name, topic, + exchange_name=None): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + """ + callback_wrapper = rpc_amqp.CallbackWrapper( + conf=self.conf, + callback=callback, + connection_pool=rpc_amqp.get_connection_pool(self.conf, + Connection), + ) + self.proxy_callbacks.append(callback_wrapper) + self.declare_topic_consumer( + queue_name=pool_name, + topic=topic, + exchange_name=exchange_name, + callback=callback_wrapper, + ) + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection( + conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg, envelope): + """Sends a notification event on a topic.""" + return rpc_amqp.notify( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection), + envelope) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) diff --git a/cinder/openstack/common/rpc/impl_qpid.py b/cinder/openstack/common/rpc/impl_qpid.py new file mode 100644 index 0000000000..24235b1f11 --- /dev/null +++ b/cinder/openstack/common/rpc/impl_qpid.py @@ -0,0 +1,649 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import itertools +import time +import uuid + +import eventlet +import greenlet +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import amqp as rpc_amqp +from cinder.openstack.common.rpc import common as rpc_common + +qpid_messaging = importutils.try_import("qpid.messaging") +qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") + +LOG = logging.getLogger(__name__) + +qpid_opts = [ + cfg.StrOpt('qpid_hostname', + default='localhost', + help='Qpid broker hostname'), + cfg.IntOpt('qpid_port', + default=5672, + help='Qpid broker port'), + cfg.ListOpt('qpid_hosts', + default=['$qpid_hostname:$qpid_port'], + help='Qpid HA cluster host:port pairs'), + cfg.StrOpt('qpid_username', + default='', + help='Username for qpid connection'), + cfg.StrOpt('qpid_password', + default='', + help='Password for qpid connection', + secret=True), + cfg.StrOpt('qpid_sasl_mechanisms', + default='', + help='Space separated list of SASL mechanisms to use for auth'), + cfg.IntOpt('qpid_heartbeat', + default=60, + help='Seconds between connection keepalive heartbeats'), + cfg.StrOpt('qpid_protocol', + default='tcp', + help="Transport to use, either 'tcp' or 'ssl'"), + cfg.BoolOpt('qpid_tcp_nodelay', + default=True, + help='Disable Nagle algorithm'), +] + +cfg.CONF.register_opts(qpid_opts) + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, session, callback, node_name, node_opts, + link_name, link_opts): + """Declare a queue on an amqp session. + + 'session' is the amqp session to use + 'callback' is the callback to call when messages are received + 'node_name' is the first part of the Qpid address string, before ';' + 'node_opts' will be applied to the "x-declare" section of "node" + in the address string. + 'link_name' goes into the "name" field of the "link" in the address + string + 'link_opts' will be applied to the "x-declare" section of "link" + in the address string. + """ + self.callback = callback + self.receiver = None + self.session = None + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { + "name": link_name, + "durable": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, + }, + } + addr_opts["node"]["x-declare"].update(node_opts) + addr_opts["link"]["x-declare"].update(link_opts) + + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-declare the receiver after a qpid reconnect""" + self.session = session + self.receiver = session.receiver(self.address) + self.receiver.capacity = 1 + + def consume(self): + """Fetch the message and pass it to the callback object""" + message = self.receiver.fetch() + try: + msg = rpc_common.deserialize_msg(message.content) + self.callback(msg) + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + finally: + self.session.acknowledge(message) + + def get_receiver(self): + return self.receiver + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, session, msg_id, callback): + """Init a 'direct' queue. + + 'session' is the amqp session to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + """ + + super(DirectConsumer, self).__init__(session, callback, + "%s/%s" % (msg_id, msg_id), + {"type": "direct"}, + msg_id, + {"exclusive": True}) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, session, topic, callback, name=None, + exchange_name=None): + """Init a 'topic' queue. + + :param session: the amqp session to use + :param topic: is the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param name: optional queue name, defaults to topic + """ + + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + super(TopicConsumer, self).__init__(session, callback, + "%s/%s" % (exchange_name, topic), + {}, name or topic, {}) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, session, topic, callback): + """Init a 'fanout' queue. + + 'session' is the amqp session to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + """ + + super(FanoutConsumer, self).__init__( + session, callback, + "%s_fanout" % topic, + {"durable": False, "type": "fanout"}, + "%s_fanout_%s" % (topic, uuid.uuid4().hex), + {"exclusive": True}) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, session, node_name, node_opts=None): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.sender = None + self.session = session + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, + }, + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) + + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-establish the Sender after a reconnection""" + self.sender = session.sender(self.address) + + def send(self, msg): + """Send a message""" + self.sender.send(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, session, msg_id): + """Init a 'direct' publisher.""" + super(DirectPublisher, self).__init__(session, msg_id, + {"type": "Direct"}) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic)) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, session, topic): + """init a 'fanout' publisher. + """ + super(FanoutPublisher, self).__init__( + session, + "%s_fanout" % topic, {"type": "fanout"}) + + +class NotifyPublisher(Publisher): + """Publisher class for notifications""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + exchange_name = rpc_amqp.get_control_exchange(conf) + super(NotifyPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic), + {"durable": True}) + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + if not qpid_messaging: + raise ImportError("Failed to import qpid.messaging") + + self.session = None + self.consumers = {} + self.consumer_thread = None + self.proxy_callbacks = [] + self.conf = conf + + if server_params and 'hostname' in server_params: + # NOTE(russellb) This enables support for cast_to_server. + server_params['qpid_hosts'] = [ + '%s:%d' % (server_params['hostname'], + server_params.get('port', 5672)) + ] + + params = { + 'qpid_hosts': self.conf.qpid_hosts, + 'username': self.conf.qpid_username, + 'password': self.conf.qpid_password, + } + params.update(server_params or {}) + + self.brokers = params['qpid_hosts'] + self.username = params['username'] + self.password = params['password'] + self.connection_create(self.brokers[0]) + self.reconnect() + + def connection_create(self, broker): + # Create the connection - this does not open the connection + self.connection = qpid_messaging.Connection(broker) + + # Check if flags are set and if so set them for the connection + # before we call open + self.connection.username = self.username + self.connection.password = self.password + + self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms + # Reconnection is done by self.reconnect() + self.connection.reconnect = False + self.connection.heartbeat = self.conf.qpid_heartbeat + self.connection.transport = self.conf.qpid_protocol + self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay + + def _register_consumer(self, consumer): + self.consumers[str(consumer.get_receiver())] = consumer + + def _lookup_consumer(self, receiver): + return self.consumers[str(receiver)] + + def reconnect(self): + """Handles reconnecting and re-establishing sessions and queues""" + if self.connection.opened(): + try: + self.connection.close() + except qpid_exceptions.ConnectionError: + pass + + attempt = 0 + delay = 1 + while True: + broker = self.brokers[attempt % len(self.brokers)] + attempt += 1 + + try: + self.connection_create(broker) + self.connection.open() + except qpid_exceptions.ConnectionError, e: + msg_dict = dict(e=e, delay=delay) + msg = _("Unable to connect to AMQP server: %(e)s. " + "Sleeping %(delay)s seconds") % msg_dict + LOG.error(msg) + time.sleep(delay) + delay = min(2 * delay, 60) + else: + LOG.info(_('Connected to AMQP server on %s'), broker) + break + + self.session = self.connection.session() + + if self.consumers: + consumers = self.consumers + self.consumers = {} + + for consumer in consumers.itervalues(): + consumer.reconnect(self.session) + self._register_consumer(consumer) + + LOG.debug(_("Re-established AMQP queues")) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (qpid_exceptions.Empty, + qpid_exceptions.ConnectionError), e: + if error_callback: + error_callback(e) + self.reconnect() + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.connection.close() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.session.close() + self.session = self.connection.session() + self.consumers = {} + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.session, topic, callback) + self._register_consumer(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + def _error_callback(exc): + if isinstance(exc, qpid_exceptions.Empty): + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + + def _consume(): + nxt_receiver = self.session.next_receiver(timeout=timeout) + try: + self._lookup_consumer(nxt_receiver).consume() + except Exception: + LOG.exception(_("Error processing message. Skipping it.")) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def wait_on_proxy_callbacks(self): + """Wait for all proxy callback threads to exit.""" + for proxy_cb in self.proxy_callbacks: + proxy_cb.wait() + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publisher_send(): + publisher = cls(self.conf, self.session, topic) + publisher.send(msg) + + return self.ensure(_connect_error, _publisher_send) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): + """Create a 'topic' consumer.""" + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + exchange_name=exchange_name, + ), + topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg, timeout=None): + """Send a 'topic' message""" + # + # We want to create a message with attributes, e.g. a TTL. We + # don't really need to keep 'msg' in its JSON format any longer + # so let's create an actual qpid message here and get some + # value-add on the go. + # + # WARNING: Request timeout happens to be in the same units as + # qpid's TTL (seconds). If this changes in the future, then this + # will need to be altered accordingly. + # + qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) + self.publisher_send(TopicPublisher, topic, qpid_message) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + + if fanout: + consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) + else: + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) + + self._register_consumer(consumer) + + return consumer + + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, + name=pool_name) + + self._register_consumer(consumer) + + return consumer + + def join_consumer_pool(self, callback, pool_name, topic, + exchange_name=None): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + """ + callback_wrapper = rpc_amqp.CallbackWrapper( + conf=self.conf, + callback=callback, + connection_pool=rpc_amqp.get_connection_pool(self.conf, + Connection), + ) + self.proxy_callbacks.append(callback_wrapper) + + consumer = TopicConsumer(conf=self.conf, + session=self.session, + topic=topic, + callback=callback_wrapper, + name=pool_name, + exchange_name=exchange_name) + + self._register_consumer(consumer) + return consumer + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection( + conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg, envelope): + """Sends a notification event on a topic.""" + return rpc_amqp.notify(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection), + envelope) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) diff --git a/cinder/openstack/common/rpc/impl_zmq.py b/cinder/openstack/common/rpc/impl_zmq.py new file mode 100644 index 0000000000..d3d3599e8f --- /dev/null +++ b/cinder/openstack/common/rpc/impl_zmq.py @@ -0,0 +1,851 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import pprint +import re +import socket +import sys +import types +import uuid + +import eventlet +import greenlet +from oslo.config import cfg + +from cinder.openstack.common import excutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import processutils as utils +from cinder.openstack.common.rpc import common as rpc_common + +zmq = importutils.try_import('eventlet.green.zmq') + +# for convenience, are not modified. +pformat = pprint.pformat +Timeout = eventlet.timeout.Timeout +LOG = rpc_common.LOG +RemoteError = rpc_common.RemoteError +RPCException = rpc_common.RPCException + +zmq_opts = [ + cfg.StrOpt('rpc_zmq_bind_address', default='*', + help='ZeroMQ bind address. Should be a wildcard (*), ' + 'an ethernet interface, or IP. ' + 'The "host" option should point or resolve to this ' + 'address.'), + + # The module.Class to use for matchmaking. + cfg.StrOpt( + 'rpc_zmq_matchmaker', + default=('cinder.openstack.common.rpc.' + 'matchmaker.MatchMakerLocalhost'), + help='MatchMaker driver', + ), + + # The following port is unassigned by IANA as of 2012-05-21 + cfg.IntOpt('rpc_zmq_port', default=9501, + help='ZeroMQ receiver listening port'), + + cfg.IntOpt('rpc_zmq_contexts', default=1, + help='Number of ZeroMQ contexts, defaults to 1'), + + cfg.IntOpt('rpc_zmq_topic_backlog', default=None, + help='Maximum number of ingress messages to locally buffer ' + 'per topic. Default is unlimited.'), + + cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', + help='Directory for holding IPC sockets'), + + cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), + help='Name of this node. Must be a valid hostname, FQDN, or ' + 'IP address. Must match "host" option, if running Nova.') +] + + +CONF = cfg.CONF +CONF.register_opts(zmq_opts) + +ZMQ_CTX = None # ZeroMQ Context, must be global. +matchmaker = None # memoized matchmaker object + + +def _serialize(data): + """ + Serialization wrapper + We prefer using JSON, but it cannot encode all types. + Error if a developer passes us bad data. + """ + try: + return jsonutils.dumps(data, ensure_ascii=True) + except TypeError: + with excutils.save_and_reraise_exception(): + LOG.error(_("JSON serialization failed.")) + + +def _deserialize(data): + """ + Deserialization wrapper + """ + LOG.debug(_("Deserializing: %s"), data) + return jsonutils.loads(data) + + +class ZmqSocket(object): + """ + A tiny wrapper around ZeroMQ to simplify the send/recv protocol + and connection management. + + Can be used as a Context (supports the 'with' statement). + """ + + def __init__(self, addr, zmq_type, bind=True, subscribe=None): + self.sock = _get_ctxt().socket(zmq_type) + self.addr = addr + self.type = zmq_type + self.subscriptions = [] + + # Support failures on sending/receiving on wrong socket type. + self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) + self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) + self.can_sub = zmq_type in (zmq.SUB, ) + + # Support list, str, & None for subscribe arg (cast to list) + do_sub = { + list: subscribe, + str: [subscribe], + type(None): [] + }[type(subscribe)] + + for f in do_sub: + self.subscribe(f) + + str_data = {'addr': addr, 'type': self.socket_s(), + 'subscribe': subscribe, 'bind': bind} + + LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) + LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) + LOG.debug(_("-> bind: %(bind)s"), str_data) + + try: + if bind: + self.sock.bind(addr) + else: + self.sock.connect(addr) + except Exception: + raise RPCException(_("Could not open socket.")) + + def socket_s(self): + """Get socket type as string.""" + t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', + 'DEALER') + return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] + + def subscribe(self, msg_filter): + """Subscribe.""" + if not self.can_sub: + raise RPCException("Cannot subscribe on this socket.") + LOG.debug(_("Subscribing to %s"), msg_filter) + + try: + self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) + except Exception: + return + + self.subscriptions.append(msg_filter) + + def unsubscribe(self, msg_filter): + """Unsubscribe.""" + if msg_filter not in self.subscriptions: + return + self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) + self.subscriptions.remove(msg_filter) + + def close(self): + if self.sock is None or self.sock.closed: + return + + # We must unsubscribe, or we'll leak descriptors. + if len(self.subscriptions) > 0: + for f in self.subscriptions: + try: + self.sock.setsockopt(zmq.UNSUBSCRIBE, f) + except Exception: + pass + self.subscriptions = [] + + try: + # Default is to linger + self.sock.close() + except Exception: + # While this is a bad thing to happen, + # it would be much worse if some of the code calling this + # were to fail. For now, lets log, and later evaluate + # if we can safely raise here. + LOG.error("ZeroMQ socket could not be closed.") + self.sock = None + + def recv(self): + if not self.can_recv: + raise RPCException(_("You cannot recv on this socket.")) + return self.sock.recv_multipart() + + def send(self, data): + if not self.can_send: + raise RPCException(_("You cannot send on this socket.")) + self.sock.send_multipart(data) + + +class ZmqClient(object): + """Client for ZMQ sockets.""" + + def __init__(self, addr, socket_type=None, bind=False): + if socket_type is None: + socket_type = zmq.PUSH + self.outq = ZmqSocket(addr, socket_type, bind=bind) + + def cast(self, msg_id, topic, data, envelope=False): + msg_id = msg_id or 0 + + if not envelope: + self.outq.send(map(bytes, + (msg_id, topic, 'cast', _serialize(data)))) + return + + rpc_envelope = rpc_common.serialize_msg(data[1], envelope) + zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) + self.outq.send(map(bytes, + (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) + + def close(self): + self.outq.close() + + +class RpcContext(rpc_common.CommonRpcContext): + """Context that supports replying to a rpc.call.""" + def __init__(self, **kwargs): + self.replies = [] + super(RpcContext, self).__init__(**kwargs) + + def deepcopy(self): + values = self.to_dict() + values['replies'] = self.replies + return self.__class__(**values) + + def reply(self, reply=None, failure=None, ending=False): + if ending: + return + self.replies.append(reply) + + @classmethod + def marshal(self, ctx): + ctx_data = ctx.to_dict() + return _serialize(ctx_data) + + @classmethod + def unmarshal(self, data): + return RpcContext.from_dict(_deserialize(data)) + + +class InternalContext(object): + """Used by ConsumerBase as a private context for - methods.""" + + def __init__(self, proxy): + self.proxy = proxy + self.msg_waiter = None + + def _get_response(self, ctx, proxy, topic, data): + """Process a curried message and cast the result to topic.""" + LOG.debug(_("Running func with context: %s"), ctx.to_dict()) + data.setdefault('version', None) + data.setdefault('args', {}) + + try: + result = proxy.dispatch( + ctx, data['version'], data['method'], + data.get('namespace'), **data['args']) + return ConsumerBase.normalize_reply(result, ctx.replies) + except greenlet.GreenletExit: + # ignore these since they are just from shutdowns + pass + except rpc_common.ClientException, e: + LOG.debug(_("Expected exception during message handling (%s)") % + e._exc_info[1]) + return {'exc': + rpc_common.serialize_remote_exception(e._exc_info, + log_failure=False)} + except Exception: + LOG.error(_("Exception during message handling")) + return {'exc': + rpc_common.serialize_remote_exception(sys.exc_info())} + + def reply(self, ctx, proxy, + msg_id=None, context=None, topic=None, msg=None): + """Reply to a casted call.""" + # NOTE(ewindisch): context kwarg exists for Grizzly compat. + # this may be able to be removed earlier than + # 'I' if ConsumerBase.process were refactored. + if type(msg) is list: + payload = msg[-1] + else: + payload = msg + + response = ConsumerBase.normalize_reply( + self._get_response(ctx, proxy, topic, payload), + ctx.replies) + + LOG.debug(_("Sending reply")) + _multi_send(_cast, ctx, topic, { + 'method': '-process_reply', + 'args': { + 'msg_id': msg_id, # Include for Folsom compat. + 'response': response + } + }, _msg_id=msg_id) + + +class ConsumerBase(object): + """Base Consumer.""" + + def __init__(self): + self.private_ctx = InternalContext(None) + + @classmethod + def normalize_reply(self, result, replies): + #TODO(ewindisch): re-evaluate and document this method. + if isinstance(result, types.GeneratorType): + return list(result) + elif replies: + return replies + else: + return [result] + + def process(self, proxy, ctx, data): + data.setdefault('version', None) + data.setdefault('args', {}) + + # Method starting with - are + # processed internally. (non-valid method name) + method = data.get('method') + if not method: + LOG.error(_("RPC message did not include method.")) + return + + # Internal method + # uses internal context for safety. + if method == '-reply': + self.private_ctx.reply(ctx, proxy, **data['args']) + return + + proxy.dispatch(ctx, data['version'], + data['method'], data.get('namespace'), **data['args']) + + +class ZmqBaseReactor(ConsumerBase): + """ + A consumer class implementing a + centralized casting broker (PULL-PUSH) + for RoundRobin requests. + """ + + def __init__(self, conf): + super(ZmqBaseReactor, self).__init__() + + self.mapping = {} + self.proxies = {} + self.threads = [] + self.sockets = [] + self.subscribe = {} + + self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) + + def register(self, proxy, in_addr, zmq_type_in, out_addr=None, + zmq_type_out=None, in_bind=True, out_bind=True, + subscribe=None): + + LOG.info(_("Registering reactor")) + + if zmq_type_in not in (zmq.PULL, zmq.SUB): + raise RPCException("Bad input socktype") + + # Items push in. + inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, + subscribe=subscribe) + + self.proxies[inq] = proxy + self.sockets.append(inq) + + LOG.info(_("In reactor registered")) + + if not out_addr: + return + + if zmq_type_out not in (zmq.PUSH, zmq.PUB): + raise RPCException("Bad output socktype") + + # Items push out. + outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) + + self.mapping[inq] = outq + self.mapping[outq] = inq + self.sockets.append(outq) + + LOG.info(_("Out reactor registered")) + + def consume_in_thread(self): + def _consume(sock): + LOG.info(_("Consuming socket")) + while True: + self.consume(sock) + + for k in self.proxies.keys(): + self.threads.append( + self.pool.spawn(_consume, k) + ) + + def wait(self): + for t in self.threads: + t.wait() + + def close(self): + for s in self.sockets: + s.close() + + for t in self.threads: + t.kill() + + +class ZmqProxy(ZmqBaseReactor): + """ + A consumer class implementing a + topic-based proxy, forwarding to + IPC sockets. + """ + + def __init__(self, conf): + super(ZmqProxy, self).__init__(conf) + pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\')) + self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep))) + + self.topic_proxy = {} + + def consume(self, sock): + ipc_dir = CONF.rpc_zmq_ipc_dir + + #TODO(ewindisch): use zero-copy (i.e. references, not copying) + data = sock.recv() + topic = data[1] + + LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) + + if topic.startswith('fanout~'): + sock_type = zmq.PUB + topic = topic.split('.', 1)[0] + elif topic.startswith('zmq_replies'): + sock_type = zmq.PUB + else: + sock_type = zmq.PUSH + + if topic not in self.topic_proxy: + def publisher(waiter): + LOG.info(_("Creating proxy for topic: %s"), topic) + + try: + # The topic is received over the network, + # don't trust this input. + if self.badchars.search(topic) is not None: + emsg = _("Topic contained dangerous characters.") + LOG.warn(emsg) + raise RPCException(emsg) + + out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % + (ipc_dir, topic), + sock_type, bind=True) + except RPCException: + waiter.send_exception(*sys.exc_info()) + return + + self.topic_proxy[topic] = eventlet.queue.LightQueue( + CONF.rpc_zmq_topic_backlog) + self.sockets.append(out_sock) + + # It takes some time for a pub socket to open, + # before we can have any faith in doing a send() to it. + if sock_type == zmq.PUB: + eventlet.sleep(.5) + + waiter.send(True) + + while(True): + data = self.topic_proxy[topic].get() + out_sock.send(data) + LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % + {'data': data}) + + wait_sock_creation = eventlet.event.Event() + eventlet.spawn(publisher, wait_sock_creation) + + try: + wait_sock_creation.wait() + except RPCException: + LOG.error(_("Topic socket file creation failed.")) + return + + try: + self.topic_proxy[topic].put_nowait(data) + LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % + {'data': data}) + except eventlet.queue.Full: + LOG.error(_("Local per-topic backlog buffer full for topic " + "%(topic)s. Dropping message.") % {'topic': topic}) + + def consume_in_thread(self): + """Runs the ZmqProxy service""" + ipc_dir = CONF.rpc_zmq_ipc_dir + consume_in = "tcp://%s:%s" % \ + (CONF.rpc_zmq_bind_address, + CONF.rpc_zmq_port) + consumption_proxy = InternalContext(None) + + if not os.path.isdir(ipc_dir): + try: + utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) + utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), + ipc_dir, run_as_root=True) + utils.execute('chmod', '750', ipc_dir, run_as_root=True) + except utils.ProcessExecutionError: + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create IPC directory %s") % + (ipc_dir, )) + + try: + self.register(consumption_proxy, + consume_in, + zmq.PULL, + out_bind=True) + except zmq.ZMQError: + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create ZeroMQ receiver daemon. " + "Socket may already be in use.")) + + super(ZmqProxy, self).consume_in_thread() + + +def unflatten_envelope(packenv): + """Unflattens the RPC envelope. + Takes a list and returns a dictionary. + i.e. [1,2,3,4] => {1: 2, 3: 4} + """ + i = iter(packenv) + h = {} + try: + while True: + k = i.next() + h[k] = i.next() + except StopIteration: + return h + + +class ZmqReactor(ZmqBaseReactor): + """ + A consumer class implementing a + consumer for messages. Can also be + used as a 1:1 proxy + """ + + def __init__(self, conf): + super(ZmqReactor, self).__init__(conf) + + def consume(self, sock): + #TODO(ewindisch): use zero-copy (i.e. references, not copying) + data = sock.recv() + LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) + if sock in self.mapping: + LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { + 'data': data}) + self.mapping[sock].send(data) + return + + proxy = self.proxies[sock] + + if data[2] == 'cast': # Legacy protocol + packenv = data[3] + + ctx, msg = _deserialize(packenv) + request = rpc_common.deserialize_msg(msg) + ctx = RpcContext.unmarshal(ctx) + elif data[2] == 'impl_zmq_v2': + packenv = data[4:] + + msg = unflatten_envelope(packenv) + request = rpc_common.deserialize_msg(msg) + + # Unmarshal only after verifying the message. + ctx = RpcContext.unmarshal(data[3]) + else: + LOG.error(_("ZMQ Envelope version unsupported or unknown.")) + return + + self.pool.spawn_n(self.process, proxy, ctx, request) + + +class Connection(rpc_common.Connection): + """Manages connections and threads.""" + + def __init__(self, conf): + self.topics = [] + self.reactor = ZmqReactor(conf) + + def create_consumer(self, topic, proxy, fanout=False): + # Register with matchmaker. + _get_matchmaker().register(topic, CONF.rpc_zmq_host) + + # Subscription scenarios + if fanout: + sock_type = zmq.SUB + subscribe = ('', fanout)[type(fanout) == str] + topic = 'fanout~' + topic.split('.', 1)[0] + else: + sock_type = zmq.PULL + subscribe = None + topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) + + if topic in self.topics: + LOG.info(_("Skipping topic registration. Already registered.")) + return + + # Receive messages from (local) proxy + inaddr = "ipc://%s/zmq_topic_%s" % \ + (CONF.rpc_zmq_ipc_dir, topic) + + LOG.debug(_("Consumer is a zmq.%s"), + ['PULL', 'SUB'][sock_type == zmq.SUB]) + + self.reactor.register(proxy, inaddr, sock_type, + subscribe=subscribe, in_bind=False) + self.topics.append(topic) + + def close(self): + _get_matchmaker().stop_heartbeat() + for topic in self.topics: + _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) + + self.reactor.close() + self.topics = [] + + def wait(self): + self.reactor.wait() + + def consume_in_thread(self): + _get_matchmaker().start_heartbeat() + self.reactor.consume_in_thread() + + +def _cast(addr, context, topic, msg, timeout=None, envelope=False, + _msg_id=None): + timeout_cast = timeout or CONF.rpc_cast_timeout + payload = [RpcContext.marshal(context), msg] + + with Timeout(timeout_cast, exception=rpc_common.Timeout): + try: + conn = ZmqClient(addr) + + # assumes cast can't return an exception + conn.cast(_msg_id, topic, payload, envelope) + except zmq.ZMQError: + raise RPCException("Cast failed. ZMQ Socket Exception") + finally: + if 'conn' in vars(): + conn.close() + + +def _call(addr, context, topic, msg, timeout=None, + envelope=False): + # timeout_response is how long we wait for a response + timeout = timeout or CONF.rpc_response_timeout + + # The msg_id is used to track replies. + msg_id = uuid.uuid4().hex + + # Replies always come into the reply service. + reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host + + LOG.debug(_("Creating payload")) + # Curry the original request into a reply method. + mcontext = RpcContext.marshal(context) + payload = { + 'method': '-reply', + 'args': { + 'msg_id': msg_id, + 'topic': reply_topic, + # TODO(ewindisch): safe to remove mcontext in I. + 'msg': [mcontext, msg] + } + } + + LOG.debug(_("Creating queue socket for reply waiter")) + + # Messages arriving async. + # TODO(ewindisch): have reply consumer with dynamic subscription mgmt + with Timeout(timeout, exception=rpc_common.Timeout): + try: + msg_waiter = ZmqSocket( + "ipc://%s/zmq_topic_zmq_replies.%s" % + (CONF.rpc_zmq_ipc_dir, + CONF.rpc_zmq_host), + zmq.SUB, subscribe=msg_id, bind=False + ) + + LOG.debug(_("Sending cast")) + _cast(addr, context, topic, payload, envelope) + + LOG.debug(_("Cast sent; Waiting reply")) + # Blocks until receives reply + msg = msg_waiter.recv() + LOG.debug(_("Received message: %s"), msg) + LOG.debug(_("Unpacking response")) + + if msg[2] == 'cast': # Legacy version + raw_msg = _deserialize(msg[-1])[-1] + elif msg[2] == 'impl_zmq_v2': + rpc_envelope = unflatten_envelope(msg[4:]) + raw_msg = rpc_common.deserialize_msg(rpc_envelope) + else: + raise rpc_common.UnsupportedRpcEnvelopeVersion( + _("Unsupported or unknown ZMQ envelope returned.")) + + responses = raw_msg['args']['response'] + # ZMQError trumps the Timeout error. + except zmq.ZMQError: + raise RPCException("ZMQ Socket Error") + except (IndexError, KeyError): + raise RPCException(_("RPC Message Invalid.")) + finally: + if 'msg_waiter' in vars(): + msg_waiter.close() + + # It seems we don't need to do all of the following, + # but perhaps it would be useful for multicall? + # One effect of this is that we're checking all + # responses for Exceptions. + for resp in responses: + if isinstance(resp, types.DictType) and 'exc' in resp: + raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) + + return responses[-1] + + +def _multi_send(method, context, topic, msg, timeout=None, + envelope=False, _msg_id=None): + """ + Wraps the sending of messages, + dispatches to the matchmaker and sends + message to all relevant hosts. + """ + conf = CONF + LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) + + queues = _get_matchmaker().queues(topic) + LOG.debug(_("Sending message(s) to: %s"), queues) + + # Don't stack if we have no matchmaker results + if len(queues) == 0: + LOG.warn(_("No matchmaker results. Not casting.")) + # While not strictly a timeout, callers know how to handle + # this exception and a timeout isn't too big a lie. + raise rpc_common.Timeout(_("No match from matchmaker.")) + + # This supports brokerless fanout (addresses > 1) + for queue in queues: + (_topic, ip_addr) = queue + _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) + + if method.__name__ == '_cast': + eventlet.spawn_n(method, _addr, context, + _topic, msg, timeout, envelope, + _msg_id) + return + return method(_addr, context, _topic, msg, timeout, + envelope) + + +def create_connection(conf, new=True): + return Connection(conf) + + +def multicall(conf, *args, **kwargs): + """Multiple calls.""" + return _multi_send(_call, *args, **kwargs) + + +def call(conf, *args, **kwargs): + """Send a message, expect a response.""" + data = _multi_send(_call, *args, **kwargs) + return data[-1] + + +def cast(conf, *args, **kwargs): + """Send a message expecting no reply.""" + _multi_send(_cast, *args, **kwargs) + + +def fanout_cast(conf, context, topic, msg, **kwargs): + """Send a message to all listening and expect no reply.""" + # NOTE(ewindisch): fanout~ is used because it avoid splitting on . + # and acts as a non-subtle hint to the matchmaker and ZmqProxy. + _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) + + +def notify(conf, context, topic, msg, envelope): + """ + Send notification event. + Notifications are sent to topic-priority. + This differs from the AMQP drivers which send to topic.priority. + """ + # NOTE(ewindisch): dot-priority in rpc notifier does not + # work with our assumptions. + topic = topic.replace('.', '-') + cast(conf, context, topic, msg, envelope=envelope) + + +def cleanup(): + """Clean up resources in use by implementation.""" + global ZMQ_CTX + if ZMQ_CTX: + ZMQ_CTX.term() + ZMQ_CTX = None + + global matchmaker + matchmaker = None + + +def _get_ctxt(): + if not zmq: + raise ImportError("Failed to import eventlet.green.zmq") + + global ZMQ_CTX + if not ZMQ_CTX: + ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) + return ZMQ_CTX + + +def _get_matchmaker(*args, **kwargs): + global matchmaker + if not matchmaker: + matchmaker = importutils.import_object( + CONF.rpc_zmq_matchmaker, *args, **kwargs) + return matchmaker diff --git a/cinder/openstack/common/rpc/matchmaker.py b/cinder/openstack/common/rpc/matchmaker.py new file mode 100644 index 0000000000..f12c14dfd1 --- /dev/null +++ b/cinder/openstack/common/rpc/matchmaker.py @@ -0,0 +1,425 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should except a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +import contextlib +import itertools +import json + +import eventlet +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +matchmaker_opts = [ + # Matchmaker ring file + cfg.StrOpt('matchmaker_ringfile', + default='/etc/nova/matchmaker_ring.json', + help='Matchmaker ring file (JSON)'), + cfg.IntOpt('matchmaker_heartbeat_freq', + default=300, + help='Heartbeat frequency'), + cfg.IntOpt('matchmaker_heartbeat_ttl', + default=600, + help='Heartbeat time-to-live.'), +] + +CONF = cfg.CONF +CONF.register_opts(matchmaker_opts) +LOG = logging.getLogger(__name__) +contextmanager = contextlib.contextmanager + + +class MatchMakerException(Exception): + """Signified a match could not be found.""" + message = _("Match not found by MatchMaker.") + + +class Exchange(object): + """ + Implements lookups. + Subclass this to support hashtables, dns, etc. + """ + def __init__(self): + pass + + def run(self, key): + raise NotImplementedError() + + +class Binding(object): + """ + A binding on which to perform a lookup. + """ + def __init__(self): + pass + + def test(self, key): + raise NotImplementedError() + + +class MatchMakerBase(object): + """ + Match Maker Base Class. + Build off HeartbeatMatchMakerBase if building a + heartbeat-capable MatchMaker. + """ + def __init__(self): + # Array of tuples. Index [2] toggles negation, [3] is last-if-true + self.bindings = [] + + self.no_heartbeat_msg = _('Matchmaker does not implement ' + 'registration or heartbeat.') + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + pass + + def ack_alive(self, key, host): + """ + Acknowledge that a key.host is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + pass + + def is_alive(self, topic, host): + """ + Checks if a host is alive. + """ + pass + + def expire(self, topic, host): + """ + Explicitly expire a host's registration. + """ + pass + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + pass + + def unregister(self, key, host): + """ + Unregister a topic. + """ + pass + + def start_heartbeat(self): + """ + Spawn heartbeat greenthread. + """ + pass + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + pass + + def add_binding(self, binding, rule, last=True): + self.bindings.append((binding, rule, False, last)) + + #NOTE(ewindisch): kept the following method in case we implement the + # underlying support. + #def add_negate_binding(self, binding, rule, last=True): + # self.bindings.append((binding, rule, True, last)) + + def queues(self, key): + workers = [] + + # bit is for negate bindings - if we choose to implement it. + # last stops processing rules if this matches. + for (binding, exchange, bit, last) in self.bindings: + if binding.test(key): + workers.extend(exchange.run(key)) + + # Support last. + if last: + return workers + return workers + + +class HeartbeatMatchMakerBase(MatchMakerBase): + """ + Base for a heart-beat capable MatchMaker. + Provides common methods for registering, + unregistering, and maintaining heartbeats. + """ + def __init__(self): + self.hosts = set() + self._heart = None + self.host_topic = {} + + super(HeartbeatMatchMakerBase, self).__init__() + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + for key, host in self.host_topic: + self.ack_alive(key, host) + + def ack_alive(self, key, host): + """ + Acknowledge that a host.topic is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + raise NotImplementedError("Must implement ack_alive") + + def backend_register(self, key, host): + """ + Implements registration logic. + Called by register(self,key,host) + """ + raise NotImplementedError("Must implement backend_register") + + def backend_unregister(self, key, key_host): + """ + Implements de-registration logic. + Called by unregister(self,key,host) + """ + raise NotImplementedError("Must implement backend_unregister") + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + self.hosts.add(host) + self.host_topic[(key, host)] = host + key_host = '.'.join((key, host)) + + self.backend_register(key, key_host) + + self.ack_alive(key, host) + + def unregister(self, key, host): + """ + Unregister a topic. + """ + if (key, host) in self.host_topic: + del self.host_topic[(key, host)] + + self.hosts.discard(host) + self.backend_unregister(key, '.'.join((key, host))) + + LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) + + def start_heartbeat(self): + """ + Implementation of MatchMakerBase.start_heartbeat + Launches greenthread looping send_heartbeats(), + yielding for CONF.matchmaker_heartbeat_freq seconds + between iterations. + """ + if len(self.hosts) == 0: + raise MatchMakerException( + _("Register before starting heartbeat.")) + + def do_heartbeat(): + while True: + self.send_heartbeats() + eventlet.sleep(CONF.matchmaker_heartbeat_freq) + + self._heart = eventlet.spawn(do_heartbeat) + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + if self._heart: + self._heart.kill() + + +class DirectBinding(Binding): + """ + Specifies a host in the key via a '.' character + Although dots are used in the key, the behavior here is + that it maps directly to a host, thus direct. + """ + def test(self, key): + if '.' in key: + return True + return False + + +class TopicBinding(Binding): + """ + Where a 'bare' key without dots. + AMQP generally considers topic exchanges to be those *with* dots, + but we deviate here in terminology as the behavior here matches + that of a topic exchange (whereas where there are dots, behavior + matches that of a direct exchange. + """ + def test(self, key): + if '.' not in key: + return True + return False + + +class FanoutBinding(Binding): + """Match on fanout keys, where key starts with 'fanout.' string.""" + def test(self, key): + if key.startswith('fanout~'): + return True + return False + + +class StubExchange(Exchange): + """Exchange that does nothing.""" + def run(self, key): + return [(key, None)] + + +class RingExchange(Exchange): + """ + Match Maker where hosts are loaded from a static file containing + a hashmap (JSON formatted). + + __init__ takes optional ring dictionary argument, otherwise + loads the ringfile from CONF.mathcmaker_ringfile. + """ + def __init__(self, ring=None): + super(RingExchange, self).__init__() + + if ring: + self.ring = ring + else: + fh = open(CONF.matchmaker_ringfile, 'r') + self.ring = json.load(fh) + fh.close() + + self.ring0 = {} + for k in self.ring.keys(): + self.ring0[k] = itertools.cycle(self.ring[k]) + + def _ring_has(self, key): + if key in self.ring0: + return True + return False + + +class RoundRobinRingExchange(RingExchange): + """A Topic Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(RoundRobinRingExchange, self).__init__(ring) + + def run(self, key): + if not self._ring_has(key): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (key, ) + ) + return [] + host = next(self.ring0[key]) + return [(key + '.' + host, host)] + + +class FanoutRingExchange(RingExchange): + """Fanout Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(FanoutRingExchange, self).__init__(ring) + + def run(self, key): + # Assume starts with "fanout~", strip it for lookup. + nkey = key.split('fanout~')[1:][0] + if not self._ring_has(nkey): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (nkey, ) + ) + return [] + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) + + +class LocalhostExchange(Exchange): + """Exchange where all direct topics are local.""" + def __init__(self, host='localhost'): + self.host = host + super(Exchange, self).__init__() + + def run(self, key): + return [('.'.join((key.split('.')[0], self.host)), self.host)] + + +class DirectExchange(Exchange): + """ + Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute.host" running on "host" + """ + def __init__(self): + super(Exchange, self).__init__() + + def run(self, key): + e = key.split('.', 1)[1] + return [(key, e)] + + +class MatchMakerRing(MatchMakerBase): + """ + Match Maker where hosts are loaded from a static hashmap. + """ + def __init__(self, ring=None): + super(MatchMakerRing, self).__init__() + self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) + self.add_binding(DirectBinding(), DirectExchange()) + self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) + + +class MatchMakerLocalhost(MatchMakerBase): + """ + Match Maker where all bare topics resolve to localhost. + Useful for testing. + """ + def __init__(self, host='localhost'): + super(MatchMakerLocalhost, self).__init__() + self.add_binding(FanoutBinding(), LocalhostExchange(host)) + self.add_binding(DirectBinding(), DirectExchange()) + self.add_binding(TopicBinding(), LocalhostExchange(host)) + + +class MatchMakerStub(MatchMakerBase): + """ + Match Maker where topics are untouched. + Useful for testing, or for AMQP/brokered queues. + Will not work where knowledge of hosts is known (i.e. zeromq) + """ + def __init__(self): + super(MatchMakerLocalhost, self).__init__() + + self.add_binding(FanoutBinding(), StubExchange()) + self.add_binding(DirectBinding(), StubExchange()) + self.add_binding(TopicBinding(), StubExchange()) diff --git a/cinder/openstack/common/rpc/matchmaker_redis.py b/cinder/openstack/common/rpc/matchmaker_redis.py new file mode 100644 index 0000000000..87f9fb2956 --- /dev/null +++ b/cinder/openstack/common/rpc/matchmaker_redis.py @@ -0,0 +1,149 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should accept a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +from oslo.config import cfg + +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import matchmaker as mm_common + +redis = importutils.try_import('redis') + + +matchmaker_redis_opts = [ + cfg.StrOpt('host', + default='127.0.0.1', + help='Host to locate redis'), + cfg.IntOpt('port', + default=6379, + help='Use this port to connect to redis host.'), + cfg.StrOpt('password', + default=None, + help='Password for Redis server. (optional)'), +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='matchmaker_redis', + title='Options for Redis-based MatchMaker') +CONF.register_group(opt_group) +CONF.register_opts(matchmaker_redis_opts, opt_group) +LOG = logging.getLogger(__name__) + + +class RedisExchange(mm_common.Exchange): + def __init__(self, matchmaker): + self.matchmaker = matchmaker + self.redis = matchmaker.redis + super(RedisExchange, self).__init__() + + +class RedisTopicExchange(RedisExchange): + """ + Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute" running on "host" + """ + def run(self, topic): + while True: + member_name = self.redis.srandmember(topic) + + if not member_name: + # If this happens, there are no + # longer any members. + break + + if not self.matchmaker.is_alive(topic, member_name): + continue + + host = member_name.split('.', 1)[1] + return [(member_name, host)] + return [] + + +class RedisFanoutExchange(RedisExchange): + """ + Return a list of all hosts. + """ + def run(self, topic): + topic = topic.split('~', 1)[1] + hosts = self.redis.smembers(topic) + good_hosts = filter( + lambda host: self.matchmaker.is_alive(topic, host), hosts) + + return [(x, x.split('.', 1)[1]) for x in good_hosts] + + +class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): + """ + MatchMaker registering and looking-up hosts with a Redis server. + """ + def __init__(self): + super(MatchMakerRedis, self).__init__() + + if not redis: + raise ImportError("Failed to import module redis.") + + self.redis = redis.StrictRedis( + host=CONF.matchmaker_redis.host, + port=CONF.matchmaker_redis.port, + password=CONF.matchmaker_redis.password) + + self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self)) + self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange()) + self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self)) + + def ack_alive(self, key, host): + topic = "%s.%s" % (key, host) + if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl): + # If we could not update the expiration, the key + # might have been pruned. Re-register, creating a new + # key in Redis. + self.register(self.topic_host[host], host) + + def is_alive(self, topic, host): + if self.redis.ttl(host) == -1: + self.expire(topic, host) + return False + return True + + def expire(self, topic, host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.delete(host) + pipe.srem(topic, host) + pipe.execute() + + def backend_register(self, key, key_host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.sadd(key, key_host) + + # No value is needed, we just + # care if it exists. Sets aren't viable + # because only keys can expire. + pipe.set(key_host, '') + + pipe.execute() + + def backend_unregister(self, key, key_host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.srem(key, key_host) + pipe.delete(key_host) + pipe.execute() diff --git a/cinder/openstack/common/rpc/proxy.py b/cinder/openstack/common/rpc/proxy.py new file mode 100644 index 0000000000..4ddc5c936a --- /dev/null +++ b/cinder/openstack/common/rpc/proxy.py @@ -0,0 +1,179 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A helper class for proxy objects to remote APIs. + +For more information about rpc API version numbers, see: + rpc/dispatcher.py +""" + + +from cinder.openstack.common import rpc + + +class RpcProxy(object): + """A helper class for rpc clients. + + This class is a wrapper around the RPC client API. It allows you to + specify the topic and API version in a single place. This is intended to + be used as a base class for a class that implements the client side of an + rpc API. + """ + + def __init__(self, topic, default_version): + """Initialize an RpcProxy. + + :param topic: The topic to use for all messages. + :param default_version: The default API version to request in all + outgoing messages. This can be overridden on a per-message + basis. + """ + self.topic = topic + self.default_version = default_version + super(RpcProxy, self).__init__() + + def _set_version(self, msg, vers): + """Helper method to set the version in a message. + + :param msg: The message having a version added to it. + :param vers: The version number to add to the message. + """ + msg['version'] = vers if vers else self.default_version + + def _get_topic(self, topic): + """Return the topic to use for a message.""" + return topic if topic else self.topic + + @staticmethod + def make_namespaced_msg(method, namespace, **kwargs): + return {'method': method, 'namespace': namespace, 'args': kwargs} + + @staticmethod + def make_msg(method, **kwargs): + return RpcProxy.make_namespaced_msg(method, None, **kwargs) + + def call(self, context, msg, topic=None, version=None, timeout=None): + """rpc.call() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + :param timeout: (Optional) A timeout to use when waiting for the + response. If no timeout is specified, a default timeout will be + used that is usually sufficient. + + :returns: The return value from the remote method. + """ + self._set_version(msg, version) + real_topic = self._get_topic(topic) + try: + return rpc.call(context, real_topic, msg, timeout) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) + + def multicall(self, context, msg, topic=None, version=None, timeout=None): + """rpc.multicall() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + :param timeout: (Optional) A timeout to use when waiting for the + response. If no timeout is specified, a default timeout will be + used that is usually sufficient. + + :returns: An iterator that lets you process each of the returned values + from the remote method as they arrive. + """ + self._set_version(msg, version) + real_topic = self._get_topic(topic) + try: + return rpc.multicall(context, real_topic, msg, timeout) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) + + def cast(self, context, msg, topic=None, version=None): + """rpc.cast() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.cast() does not wait on any return value from the + remote method. + """ + self._set_version(msg, version) + rpc.cast(context, self._get_topic(topic), msg) + + def fanout_cast(self, context, msg, topic=None, version=None): + """rpc.fanout_cast() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.fanout_cast() does not wait on any return value + from the remote method. + """ + self._set_version(msg, version) + rpc.fanout_cast(context, self._get_topic(topic), msg) + + def cast_to_server(self, context, server_params, msg, topic=None, + version=None): + """rpc.cast_to_server() a remote method. + + :param context: The request context + :param server_params: Server parameters. See rpc.cast_to_server() for + details. + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.cast_to_server() does not wait on any + return values. + """ + self._set_version(msg, version) + rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) + + def fanout_cast_to_server(self, context, server_params, msg, topic=None, + version=None): + """rpc.fanout_cast_to_server() a remote method. + + :param context: The request context + :param server_params: Server parameters. See rpc.cast_to_server() for + details. + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.fanout_cast_to_server() does not wait on any + return values. + """ + self._set_version(msg, version) + rpc.fanout_cast_to_server(context, server_params, + self._get_topic(topic), msg) diff --git a/cinder/openstack/common/rpc/service.py b/cinder/openstack/common/rpc/service.py new file mode 100644 index 0000000000..b1f997d38f --- /dev/null +++ b/cinder/openstack/common/rpc/service.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher +from cinder.openstack.common import service + + +LOG = logging.getLogger(__name__) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host.""" + def __init__(self, host, topic, manager=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, dispatcher, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, dispatcher, fanout=False) + + self.conn.create_consumer(self.topic, dispatcher, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() diff --git a/cinder/openstack/common/rpc/zmq_receiver.py b/cinder/openstack/common/rpc/zmq_receiver.py new file mode 100755 index 0000000000..e4c6ee30ed --- /dev/null +++ b/cinder/openstack/common/rpc/zmq_receiver.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +eventlet.monkey_patch() + +import contextlib +import sys + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import impl_zmq + +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) + + +def main(): + CONF(sys.argv[1:], project='oslo') + logging.setup("oslo") + + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() diff --git a/cinder/openstack/common/scheduler/__init__.py b/cinder/openstack/common/scheduler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/openstack/common/scheduler/filter.py b/cinder/openstack/common/scheduler/filter.py new file mode 100644 index 0000000000..52c18afa37 --- /dev/null +++ b/cinder/openstack/common/scheduler/filter.py @@ -0,0 +1,71 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Filter support +""" + +import inspect + +from stevedore import extension + + +class BaseFilter(object): + """Base class for all filter classes.""" + def _filter_one(self, obj, filter_properties): + """Return True if it passes the filter, False otherwise. + Override this in a subclass. + """ + return True + + def filter_all(self, filter_obj_list, filter_properties): + """Yield objects that pass the filter. + + Can be overriden in a subclass, if you need to base filtering + decisions on all objects. Otherwise, one can just override + _filter_one() to filter a single object. + """ + for obj in filter_obj_list: + if self._filter_one(obj, filter_properties): + yield obj + + +class BaseFilterHandler(object): + """ Base class to handle loading filter classes. + + This class should be subclassed where one needs to use filters. + """ + def __init__(self, filter_class_type, filter_namespace): + self.namespace = filter_namespace + self.filter_class_type = filter_class_type + self.filter_manager = extension.ExtensionManager(filter_namespace) + + def _is_correct_class(self, obj): + """Return whether an object is a class of the correct type and + is not prefixed with an underscore. + """ + return (inspect.isclass(obj) and + not obj.__name__.startswith('_') and + issubclass(obj, self.filter_class_type)) + + def get_all_classes(self): + return [x.plugin for x in self.filter_manager + if self._is_correct_class(x.plugin)] + + def get_filtered_objects(self, filter_classes, objs, + filter_properties): + for filter_cls in filter_classes: + objs = filter_cls().filter_all(objs, filter_properties) + return list(objs) diff --git a/cinder/openstack/common/scheduler/filters/__init__.py b/cinder/openstack/common/scheduler/filters/__init__.py new file mode 100644 index 0000000000..40bf096a41 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler host filters +""" + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filter + +LOG = logging.getLogger(__name__) + + +class BaseHostFilter(filter.BaseFilter): + """Base class for host filters.""" + def _filter_one(self, obj, filter_properties): + """Return True if the object passes the filter, otherwise False.""" + return self.host_passes(obj, filter_properties) + + def host_passes(self, host_state, filter_properties): + """Return True if the HostState passes the filter, otherwise False. + Override this in a subclass. + """ + raise NotImplementedError() + + +class HostFilterHandler(filter.BaseFilterHandler): + def __init__(self, namespace): + super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) diff --git a/cinder/openstack/common/scheduler/filters/availability_zone_filter.py b/cinder/openstack/common/scheduler/filters/availability_zone_filter.py new file mode 100644 index 0000000000..0c3ca1ef74 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/availability_zone_filter.py @@ -0,0 +1,30 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common.scheduler import filters + + +class AvailabilityZoneFilter(filters.BaseHostFilter): + """Filters Hosts by availability zone.""" + + def host_passes(self, host_state, filter_properties): + spec = filter_properties.get('request_spec', {}) + props = spec.get('resource_properties', []) + availability_zone = props.get('availability_zone') + + if availability_zone: + return availability_zone == host_state.service['availability_zone'] + return True diff --git a/cinder/openstack/common/scheduler/filters/capabilities_filter.py b/cinder/openstack/common/scheduler/filters/capabilities_filter.py new file mode 100644 index 0000000000..ae859e927a --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/capabilities_filter.py @@ -0,0 +1,63 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common.scheduler.filters import extra_specs_ops + + +LOG = logging.getLogger(__name__) + + +class CapabilitiesFilter(filters.BaseHostFilter): + """HostFilter to work with resource (instance & volume) type records.""" + + def _satisfies_extra_specs(self, capabilities, resource_type): + """Check that the capabilities provided by the services + satisfy the extra specs associated with the instance type.""" + extra_specs = resource_type.get('extra_specs', []) + if not extra_specs: + return True + + for key, req in extra_specs.iteritems(): + # Either not scope format, or in capabilities scope + scope = key.split(':') + if len(scope) > 1 and scope[0] != "capabilities": + continue + elif scope[0] == "capabilities": + del scope[0] + + cap = capabilities + for index in range(0, len(scope)): + try: + cap = cap.get(scope[index], None) + except AttributeError: + return False + if cap is None: + return False + if not extra_specs_ops.match(cap, req): + return False + return True + + def host_passes(self, host_state, filter_properties): + """Return a list of hosts that can create instance_type.""" + # Note(zhiteng) Currently only Cinder and Nova are using + # this filter, so the resource type is either instance or + # volume. + resource_type = filter_properties.get('resource_type') + if not self._satisfies_extra_specs(host_state.capabilities, + resource_type): + return False + return True diff --git a/cinder/openstack/common/scheduler/filters/extra_specs_ops.py b/cinder/openstack/common/scheduler/filters/extra_specs_ops.py new file mode 100644 index 0000000000..b3730f5af4 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/extra_specs_ops.py @@ -0,0 +1,72 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import operator + +from cinder.openstack.common import strutils + +# 1. The following operations are supported: +# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= +# 2. Note that is handled in a different way below. +# 3. If the first word in the extra_specs is not one of the operators, +# it is ignored. +_op_methods = {'=': lambda x, y: float(x) >= float(y), + '': lambda x, y: y in x, + '': lambda x, y: (strutils.bool_from_string(x) is + strutils.bool_from_string(y)), + '==': lambda x, y: float(x) == float(y), + '!=': lambda x, y: float(x) != float(y), + '>=': lambda x, y: float(x) >= float(y), + '<=': lambda x, y: float(x) <= float(y), + 's==': operator.eq, + 's!=': operator.ne, + 's<': operator.lt, + 's<=': operator.le, + 's>': operator.gt, + 's>=': operator.ge} + + +def match(value, req): + words = req.split() + + op = method = None + if words: + op = words.pop(0) + method = _op_methods.get(op) + + if op != '' and not method: + return value == req + + if value is None: + return False + + if op == '': # Ex: v1 v2 v3 + while True: + if words.pop(0) == value: + return True + if not words: + break + op = words.pop(0) # remove a keyword + if not words: + break + return False + + try: + if words and method(value, words[0]): + return True + except ValueError: + pass + + return False diff --git a/cinder/openstack/common/scheduler/filters/json_filter.py b/cinder/openstack/common/scheduler/filters/json_filter.py new file mode 100644 index 0000000000..370f23b2ae --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/json_filter.py @@ -0,0 +1,150 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import operator + +from cinder.openstack.common import jsonutils +from cinder.openstack.common.scheduler import filters + + +class JsonFilter(filters.BaseHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_compare(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + if op is operator.contains: + bad = args[0] not in args[1:] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_compare(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_compare(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_compare(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms""" + return self._op_compare(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_compare(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_compare(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def _parse_string(self, string, host_state): + """Strings prefixed with $ are capability lookups in the + form '$variable' where 'variable' is an attribute in the + HostState class. If $variable is a dictionary, you may + use: $variable.dictkey + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + obj = getattr(host_state, path[0], None) + if obj is None: + return None + for item in path[1:]: + obj = obj.get(item, None) + if obj is None: + return None + return obj + + def _process_filter(self, query, host_state): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(arg, host_state) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host_state) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def host_passes(self, host_state, filter_properties): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + # TODO(zhiteng) Add description for filter_properties structure + # and scheduler_hints. + try: + query = filter_properties['scheduler_hints']['query'] + except KeyError: + query = None + if not query: + return True + + # NOTE(comstud): Not checking capabilities or service for + # enabled/disabled so that a provided json filter can decide + + result = self._process_filter(jsonutils.loads(query), host_state) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + # Filter it out. + return True + return False diff --git a/cinder/openstack/common/scheduler/weight.py b/cinder/openstack/common/scheduler/weight.py new file mode 100644 index 0000000000..82f1d25ee3 --- /dev/null +++ b/cinder/openstack/common/scheduler/weight.py @@ -0,0 +1,91 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Pluggable Weighing support +""" + +import inspect + +from stevedore import extension + + +class WeighedObject(object): + """Object with weight information.""" + def __init__(self, obj, weight): + self.obj = obj + self.weight = weight + + def __repr__(self): + return "" % (self.obj, self.weight) + + +class BaseWeigher(object): + """Base class for pluggable weighers.""" + def _weight_multiplier(self): + """How weighted this weigher should be. Normally this would + be overriden in a subclass based on a config value. + """ + return 1.0 + + def _weigh_object(self, obj, weight_properties): + """Override in a subclass to specify a weight for a specific + object. + """ + return 0.0 + + def weigh_objects(self, weighed_obj_list, weight_properties): + """Weigh multiple objects. Override in a subclass if you need + need access to all objects in order to manipulate weights. + """ + constant = self._weight_multiplier() + for obj in weighed_obj_list: + obj.weight += (constant * + self._weigh_object(obj.obj, weight_properties)) + + +class BaseWeightHandler(object): + object_class = WeighedObject + + def __init__(self, weighed_object_type, weight_namespace): + self.namespace = weight_namespace + self.weighed_object_type = weighed_object_type + self.weight_manager = extension.ExtensionManager(weight_namespace) + + def _is_correct_class(self, obj): + """Return whether an object is a class of the correct type and + is not prefixed with an underscore. + """ + return (inspect.isclass(obj) and + not obj.__name__.startswith('_') and + issubclass(obj, self.weighed_object_type)) + + def get_all_classes(self): + return [x.plugin for x in self.weight_manager + if self._is_correct_class(x.plugin)] + + def get_weighed_objects(self, weigher_classes, obj_list, + weighing_properties): + """Return a sorted (highest score first) list of WeighedObjects.""" + + if not obj_list: + return [] + + weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] + for weigher_cls in weigher_classes: + weigher = weigher_cls() + weigher.weigh_objects(weighed_objs, weighing_properties) + + return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) diff --git a/cinder/openstack/common/scheduler/weights/__init__.py b/cinder/openstack/common/scheduler/weights/__init__.py new file mode 100644 index 0000000000..a2743577d7 --- /dev/null +++ b/cinder/openstack/common/scheduler/weights/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler host weights +""" + + +from cinder.openstack.common.scheduler import weight + + +class WeighedHost(weight.WeighedObject): + def to_dict(self): + return { + 'weight': self.weight, + 'host': self.obj.host, + } + + def __repr__(self): + return ("WeighedHost [host: %s, weight: %s]" % + (self.obj.host, self.weight)) + + +class BaseHostWeigher(weight.BaseWeigher): + """Base class for host weights.""" + pass + + +class HostWeightHandler(weight.BaseWeightHandler): + object_class = WeighedHost + + def __init__(self, namespace): + super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) diff --git a/cinder/openstack/common/service.py b/cinder/openstack/common/service.py new file mode 100644 index 0000000000..8600a0b087 --- /dev/null +++ b/cinder/openstack/common/service.py @@ -0,0 +1,332 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import os +import random +import signal +import sys +import time + +import eventlet +import logging as std_logging +from oslo.config import cfg + +from cinder.openstack.common import eventlet_backdoor +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import threadgroup + + +rpc = importutils.try_import('cinder.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = threadgroup.ThreadGroup() + eventlet_backdoor.initialize_if_enabled() + + @staticmethod + def run_service(service): + """Start and wait for a service to finish. + + :param service: service to run and wait for. + :returns: None + + """ + service.start() + service.wait() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + self._services.add_thread(self.run_service, service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self._services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self._services.wait() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + raise SignalExit(signo) + + def wait(self): + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + status = None + try: + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + finally: + if rpc: + rpc.cleanup() + self.stop() + return status + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process(self, service): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + signal.signal(signal.SIGTERM, _sigterm) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.run_service(service) + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + status = 0 + try: + self._child_process(wrap.service) + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + wrap.service.stop() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def wait(self): + """Loop waiting on children to die and respawning as necessary""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(.01) + continue + + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + if self.sigcaught: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[self.sigcaught] + LOG.info(_('Caught %s, stopping children'), signame) + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + def start(self): + pass + + def stop(self): + self.tg.stop() + + def wait(self): + self.tg.wait() + + +def launch(service, workers=None): + if workers: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + else: + launcher = ServiceLauncher() + launcher.launch_service(service) + return launcher diff --git a/cinder/openstack/common/strutils.py b/cinder/openstack/common/strutils.py new file mode 100644 index 0000000000..a4e3899725 --- /dev/null +++ b/cinder/openstack/common/strutils.py @@ -0,0 +1,150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import sys + +from cinder.openstack.common.gettextutils import _ + + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + + +def int_from_bool_as_string(subject): + """ + Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False): + """ + Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else is considered False. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, basestring): + subject = str(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return False + + +def safe_decode(text, incoming=None, errors='strict'): + """ + Decodes incoming str using `incoming` if they're + not already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, unicode): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """ + Encodes incoming str/unicode using `encoding`. If + incoming is not specified, text is expected to + be encoded with current python's default encoding. + (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, unicode): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + + return text diff --git a/cinder/openstack/common/threadgroup.py b/cinder/openstack/common/threadgroup.py new file mode 100644 index 0000000000..5d6ec006b9 --- /dev/null +++ b/cinder/openstack/common/threadgroup.py @@ -0,0 +1,114 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenlet +from eventlet import greenpool +from eventlet import greenthread + +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """ Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """ Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + +class ThreadGroup(object): + """ The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + continue + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/cinder/openstack/common/timeutils.py b/cinder/openstack/common/timeutils.py new file mode 100644 index 0000000000..6094365907 --- /dev/null +++ b/cinder/openstack/common/timeutils.py @@ -0,0 +1,186 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime + +import iso8601 + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(e.message) + except TypeError as e: + raise ValueError(e.message) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, basestring): + before = parse_strtime(before).replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, basestring): + after = parse_strtime(after).replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """ + Override utils.utcnow to return a constant time or a list thereof, + one at a time. + """ + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times.""" + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """ + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """ + Determines if time is going to happen in the next window seconds. + + :params dt: the time + :params window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/cinder/openstack/common/uuidutils.py b/cinder/openstack/common/uuidutils.py new file mode 100644 index 0000000000..7608acb942 --- /dev/null +++ b/cinder/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/cinder/policy.py b/cinder/policy.py new file mode 100644 index 0000000000..9b2b4defd1 --- /dev/null +++ b/cinder/policy.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Policy Engine For Cinder""" + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import policy +from cinder import utils + +policy_opts = [ + cfg.StrOpt('policy_file', + default='policy.json', + help=_('JSON file representing policy')), + cfg.StrOpt('policy_default_rule', + default='default', + help=_('Rule checked when requested rule is not found')), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(policy_opts) + +_POLICY_PATH = None +_POLICY_CACHE = {} + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = utils.find_config(FLAGS.policy_file) + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_brain) + + +def _set_brain(data): + default_rule = FLAGS.policy_default_rule + policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) + + +def enforce(context, action, target): + """Verifies that the action is valid on the target in this context. + + :param context: cinder context + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + + :param object: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + + :raises cinder.exception.PolicyNotAuthorized: if verification fails. + + """ + init() + + match_list = ('rule:%s' % action,) + credentials = context.to_dict() + + policy.enforce(match_list, target, credentials, + exception.PolicyNotAuthorized, action=action) + + +def check_is_admin(roles): + """Whether or not roles contains 'admin' role according to policy setting. + + """ + init() + + action = 'context_is_admin' + match_list = ('rule:%s' % action,) + # include project_id on target to avoid KeyError if context_is_admin + # policy definition is missing, and default admin_or_owner rule + # attempts to apply. Since our credentials dict does not include a + # project_id, this target can never match as a generic rule. + target = {'project_id': ''} + credentials = {'roles': roles} + + return policy.enforce(match_list, target, credentials) diff --git a/cinder/quota.py b/cinder/quota.py new file mode 100644 index 0000000000..59868d33d2 --- /dev/null +++ b/cinder/quota.py @@ -0,0 +1,813 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Quotas for volumes.""" + +import datetime + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + +quota_opts = [ + cfg.IntOpt('quota_volumes', + default=10, + help='number of volumes allowed per project'), + cfg.IntOpt('quota_snapshots', + default=10, + help='number of volume snapshots allowed per project'), + cfg.IntOpt('quota_gigabytes', + default=1000, + help='number of volume gigabytes (snapshots are also included) ' + 'allowed per project'), + cfg.IntOpt('reservation_expire', + default=86400, + help='number of seconds until a reservation expires'), + cfg.IntOpt('until_refresh', + default=0, + help='count of reservations until usage is refreshed'), + cfg.IntOpt('max_age', + default=0, + help='number of seconds between subsequent usage refreshes'), + cfg.StrOpt('quota_driver', + default='cinder.quota.DbQuotaDriver', + help='default driver to use for quota checks'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(quota_opts) + + +class DbQuotaDriver(object): + """ + Driver to perform necessary checks to enforce quotas and obtain + quota information. The default driver utilizes the local + database. + """ + + def get_by_project(self, context, project_id, resource): + """Get a specific quota by project.""" + + return db.quota_get(context, project_id, resource) + + def get_by_class(self, context, quota_class, resource): + """Get a specific quota by quota class.""" + + return db.quota_class_get(context, quota_class, resource) + + def get_defaults(self, context, resources): + """Given a list of resources, retrieve the default quotas. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + """ + + quotas = {} + for resource in resources.values(): + quotas[resource.name] = resource.default + + return quotas + + def get_class_quotas(self, context, resources, quota_class, + defaults=True): + """ + Given a list of resources, retrieve the quotas for the given + quota class. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param quota_class: The name of the quota class to return + quotas for. + :param defaults: If True, the default value will be reported + if there is no specific value for the + resource. + """ + + quotas = {} + class_quotas = db.quota_class_get_all_by_name(context, quota_class) + for resource in resources.values(): + if defaults or resource.name in class_quotas: + quotas[resource.name] = class_quotas.get(resource.name, + resource.default) + + return quotas + + def get_project_quotas(self, context, resources, project_id, + quota_class=None, defaults=True, + usages=True): + """ + Given a list of resources, retrieve the quotas for the given + project. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param project_id: The ID of the project to return quotas for. + :param quota_class: If project_id != context.project_id, the + quota class cannot be determined. This + parameter allows it to be specified. It + will be ignored if project_id == + context.project_id. + :param defaults: If True, the quota class value (or the + default value, if there is no value from the + quota class) will be reported if there is no + specific value for the resource. + :param usages: If True, the current in_use and reserved counts + will also be returned. + """ + + quotas = {} + project_quotas = db.quota_get_all_by_project(context, project_id) + if usages: + project_usages = db.quota_usage_get_all_by_project(context, + project_id) + + # Get the quotas for the appropriate class. If the project ID + # matches the one in the context, we use the quota_class from + # the context, otherwise, we use the provided quota_class (if + # any) + if project_id == context.project_id: + quota_class = context.quota_class + if quota_class: + class_quotas = db.quota_class_get_all_by_name(context, quota_class) + else: + class_quotas = {} + + for resource in resources.values(): + # Omit default/quota class values + if not defaults and resource.name not in project_quotas: + continue + + quotas[resource.name] = dict( + limit=project_quotas.get(resource.name, + class_quotas.get(resource.name, + resource.default)), ) + + # Include usages if desired. This is optional because one + # internal consumer of this interface wants to access the + # usages directly from inside a transaction. + if usages: + usage = project_usages.get(resource.name, {}) + quotas[resource.name].update( + in_use=usage.get('in_use', 0), + reserved=usage.get('reserved', 0), ) + + return quotas + + def _get_quotas(self, context, resources, keys, has_sync, project_id=None): + """ + A helper method which retrieves the quotas for the specific + resources identified by keys, and which apply to the current + context. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param keys: A list of the desired quotas to retrieve. + :param has_sync: If True, indicates that the resource must + have a sync attribute; if False, indicates + that the resource must NOT have a sync + attribute. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + # Filter resources + if has_sync: + sync_filt = lambda x: hasattr(x, 'sync') + else: + sync_filt = lambda x: not hasattr(x, 'sync') + desired = set(keys) + sub_resources = dict((k, v) for k, v in resources.items() + if k in desired and sync_filt(v)) + + # Make sure we accounted for all of them... + if len(keys) != len(sub_resources): + unknown = desired - set(sub_resources.keys()) + raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) + + # Grab and return the quotas (without usages) + quotas = self.get_project_quotas(context, sub_resources, + project_id, + context.quota_class, usages=False) + + return dict((k, v['limit']) for k, v in quotas.items()) + + def limit_check(self, context, resources, values, project_id=None): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param values: A dictionary of the values to check against the + quota. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + # Ensure no value is less than zero + unders = [key for key, val in values.items() if val < 0] + if unders: + raise exception.InvalidQuotaValue(unders=sorted(unders)) + + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + # Get the applicable quotas + quotas = self._get_quotas(context, resources, values.keys(), + has_sync=False, project_id=project_id) + # Check the quotas and construct a list of the resources that + # would be put over limit by the desired values + overs = [key for key, val in values.items() + if quotas[key] >= 0 and quotas[key] < val] + if overs: + raise exception.OverQuota(overs=sorted(overs), quotas=quotas, + usages={}) + + def reserve(self, context, resources, deltas, expire=None, + project_id=None): + """Check quotas and reserve resources. + + For counting quotas--those quotas for which there is a usage + synchronization function--this method checks quotas against + current usage and the desired deltas. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it does not have a usage + synchronization function. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns a + list of reservation UUIDs which were created. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param deltas: A dictionary of the proposed delta changes. + :param expire: An optional parameter specifying an expiration + time for the reservations. If it is a simple + number, it is interpreted as a number of + seconds and added to the current time; if it is + a datetime.timedelta object, it will also be + added to the current time. A datetime.datetime + object will be interpreted as the absolute + expiration time. If None is specified, the + default expiration time set by + --default-reservation-expire will be used (this + value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + # Set up the reservation expiration + if expire is None: + expire = FLAGS.reservation_expire + if isinstance(expire, (int, long)): + expire = datetime.timedelta(seconds=expire) + if isinstance(expire, datetime.timedelta): + expire = timeutils.utcnow() + expire + if not isinstance(expire, datetime.datetime): + raise exception.InvalidReservationExpiration(expire=expire) + + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + # Get the applicable quotas. + # NOTE(Vek): We're not worried about races at this point. + # Yes, the admin may be in the process of reducing + # quotas, but that's a pretty rare thing. + quotas = self._get_quotas(context, resources, deltas.keys(), + has_sync=True, project_id=project_id) + + # NOTE(Vek): Most of the work here has to be done in the DB + # API, because we have to do it in a transaction, + # which means access to the session. Since the + # session isn't available outside the DBAPI, we + # have to do the work there. + return db.quota_reserve(context, resources, quotas, deltas, expire, + FLAGS.until_refresh, FLAGS.max_age, + project_id=project_id) + + def commit(self, context, reservations, project_id=None): + """Commit reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + db.reservation_commit(context, reservations, project_id=project_id) + + def rollback(self, context, reservations, project_id=None): + """Roll back reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + db.reservation_rollback(context, reservations, project_id=project_id) + + def destroy_all_by_project(self, context, project_id): + """ + Destroy all quotas, usages, and reservations associated with a + project. + + :param context: The request context, for access checks. + :param project_id: The ID of the project being deleted. + """ + + db.quota_destroy_all_by_project(context, project_id) + + def expire(self, context): + """Expire reservations. + + Explores all currently existing reservations and rolls back + any that have expired. + + :param context: The request context, for access checks. + """ + + db.reservation_expire(context) + + +class BaseResource(object): + """Describe a single resource for quota checking.""" + + def __init__(self, name, flag=None): + """ + Initializes a Resource. + + :param name: The name of the resource, i.e., "volumes". + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + self.name = name + self.flag = flag + + def quota(self, driver, context, **kwargs): + """ + Given a driver and context, obtain the quota for this + resource. + + :param driver: A quota driver. + :param context: The request context. + :param project_id: The project to obtain the quota value for. + If not provided, it is taken from the + context. If it is given as None, no + project-specific quota will be searched + for. + :param quota_class: The quota class corresponding to the + project, or for which the quota is to be + looked up. If not provided, it is taken + from the context. If it is given as None, + no quota class-specific quota will be + searched for. Note that the quota class + defaults to the value in the context, + which may not correspond to the project if + project_id is not the same as the one in + the context. + """ + + # Get the project ID + project_id = kwargs.get('project_id', context.project_id) + + # Ditto for the quota class + quota_class = kwargs.get('quota_class', context.quota_class) + + # Look up the quota for the project + if project_id: + try: + return driver.get_by_project(context, project_id, self.name) + except exception.ProjectQuotaNotFound: + pass + + # Try for the quota class + if quota_class: + try: + return driver.get_by_class(context, quota_class, self.name) + except exception.QuotaClassNotFound: + pass + + # OK, return the default + return self.default + + @property + def default(self): + """Return the default value of the quota.""" + + return FLAGS[self.flag] if self.flag else -1 + + +class ReservableResource(BaseResource): + """Describe a reservable resource.""" + + def __init__(self, name, sync, flag=None): + """ + Initializes a ReservableResource. + + Reservable resources are those resources which directly + correspond to objects in the database, i.e., volumes, gigabytes, + etc. A ReservableResource must be constructed with a usage + synchronization function, which will be called to determine the + current counts of one or more resources. + + The usage synchronization function will be passed three + arguments: an admin context, the project ID, and an opaque + session object, which should in turn be passed to the + underlying database function. Synchronization functions + should return a dictionary mapping resource names to the + current in_use count for those resources; more than one + resource and resource count may be returned. Note that + synchronization functions may be associated with more than one + ReservableResource. + + :param name: The name of the resource, i.e., "volumes". + :param sync: A callable which returns a dictionary to + resynchronize the in_use count for one or more + resources, as described above. + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + super(ReservableResource, self).__init__(name, flag=flag) + self.sync = sync + + +class AbsoluteResource(BaseResource): + """Describe a non-reservable resource.""" + + pass + + +class CountableResource(AbsoluteResource): + """ + Describe a resource where the counts aren't based solely on the + project ID. + """ + + def __init__(self, name, count, flag=None): + """ + Initializes a CountableResource. + + Countable resources are those resources which directly + correspond to objects in the database, i.e., volumes, gigabytes, + etc., but for which a count by project ID is inappropriate. A + CountableResource must be constructed with a counting + function, which will be called to determine the current counts + of the resource. + + The counting function will be passed the context, along with + the extra positional and keyword arguments that are passed to + Quota.count(). It should return an integer specifying the + count. + + Note that this counting is not performed in a transaction-safe + manner. This resource class is a temporary measure to provide + required functionality, until a better approach to solving + this problem can be evolved. + + :param name: The name of the resource, i.e., "volumes". + :param count: A callable which returns the count of the + resource. The arguments passed are as described + above. + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + super(CountableResource, self).__init__(name, flag=flag) + self.count = count + + +class QuotaEngine(object): + """Represent the set of recognized quotas.""" + + def __init__(self, quota_driver_class=None): + """Initialize a Quota object.""" + + if not quota_driver_class: + quota_driver_class = FLAGS.quota_driver + + if isinstance(quota_driver_class, basestring): + quota_driver_class = importutils.import_object(quota_driver_class) + + self._resources = {} + self._driver = quota_driver_class + + def __contains__(self, resource): + return resource in self._resources + + def register_resource(self, resource): + """Register a resource.""" + + self._resources[resource.name] = resource + + def register_resources(self, resources): + """Register a list of resources.""" + + for resource in resources: + self.register_resource(resource) + + def get_by_project(self, context, project_id, resource): + """Get a specific quota by project.""" + + return self._driver.get_by_project(context, project_id, resource) + + def get_by_class(self, context, quota_class, resource): + """Get a specific quota by quota class.""" + + return self._driver.get_by_class(context, quota_class, resource) + + def get_defaults(self, context): + """Retrieve the default quotas. + + :param context: The request context, for access checks. + """ + + return self._driver.get_defaults(context, self._resources) + + def get_class_quotas(self, context, quota_class, defaults=True): + """Retrieve the quotas for the given quota class. + + :param context: The request context, for access checks. + :param quota_class: The name of the quota class to return + quotas for. + :param defaults: If True, the default value will be reported + if there is no specific value for the + resource. + """ + + return self._driver.get_class_quotas(context, self._resources, + quota_class, defaults=defaults) + + def get_project_quotas(self, context, project_id, quota_class=None, + defaults=True, usages=True): + """Retrieve the quotas for the given project. + + :param context: The request context, for access checks. + :param project_id: The ID of the project to return quotas for. + :param quota_class: If project_id != context.project_id, the + quota class cannot be determined. This + parameter allows it to be specified. + :param defaults: If True, the quota class value (or the + default value, if there is no value from the + quota class) will be reported if there is no + specific value for the resource. + :param usages: If True, the current in_use and reserved counts + will also be returned. + """ + + return self._driver.get_project_quotas(context, self._resources, + project_id, + quota_class=quota_class, + defaults=defaults, + usages=usages) + + def count(self, context, resource, *args, **kwargs): + """Count a resource. + + For countable resources, invokes the count() function and + returns its result. Arguments following the context and + resource are passed directly to the count function declared by + the resource. + + :param context: The request context, for access checks. + :param resource: The name of the resource, as a string. + """ + + # Get the resource + res = self._resources.get(resource) + if not res or not hasattr(res, 'count'): + raise exception.QuotaResourceUnknown(unknown=[resource]) + + return res.count(context, *args, **kwargs) + + def limit_check(self, context, project_id=None, **values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. The + values to check are given as keyword arguments, where the key + identifies the specific quota limit to check, and the value is + the proposed value. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + return self._driver.limit_check(context, self._resources, values, + project_id=project_id) + + def reserve(self, context, expire=None, project_id=None, **deltas): + """Check quotas and reserve resources. + + For counting quotas--those quotas for which there is a usage + synchronization function--this method checks quotas against + current usage and the desired deltas. The deltas are given as + keyword arguments, and current usage and other reservations + are factored into the quota check. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it does not have a usage + synchronization function. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns a + list of reservation UUIDs which were created. + + :param context: The request context, for access checks. + :param expire: An optional parameter specifying an expiration + time for the reservations. If it is a simple + number, it is interpreted as a number of + seconds and added to the current time; if it is + a datetime.timedelta object, it will also be + added to the current time. A datetime.datetime + object will be interpreted as the absolute + expiration time. If None is specified, the + default expiration time set by + --default-reservation-expire will be used (this + value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + reservations = self._driver.reserve(context, self._resources, deltas, + expire=expire, + project_id=project_id) + + LOG.debug(_("Created reservations %(reservations)s") % locals()) + + return reservations + + def commit(self, context, reservations, project_id=None): + """Commit reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + try: + self._driver.commit(context, reservations, project_id=project_id) + except Exception: + # NOTE(Vek): Ignoring exceptions here is safe, because the + # usage resynchronization and the reservation expiration + # mechanisms will resolve the issue. The exception is + # logged, however, because this is less than optimal. + LOG.exception(_("Failed to commit reservations " + "%(reservations)s") % locals()) + + def rollback(self, context, reservations, project_id=None): + """Roll back reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + try: + self._driver.rollback(context, reservations, project_id=project_id) + except Exception: + # NOTE(Vek): Ignoring exceptions here is safe, because the + # usage resynchronization and the reservation expiration + # mechanisms will resolve the issue. The exception is + # logged, however, because this is less than optimal. + LOG.exception(_("Failed to roll back reservations " + "%(reservations)s") % locals()) + + def destroy_all_by_project(self, context, project_id): + """ + Destroy all quotas, usages, and reservations associated with a + project. + + :param context: The request context, for access checks. + :param project_id: The ID of the project being deleted. + """ + + self._driver.destroy_all_by_project(context, project_id) + + def expire(self, context): + """Expire reservations. + + Explores all currently existing reservations and rolls back + any that have expired. + + :param context: The request context, for access checks. + """ + + self._driver.expire(context) + + @property + def resources(self): + return sorted(self._resources.keys()) + + +def _sync_volumes(context, project_id, session): + (volumes, gigs) = db.volume_data_get_for_project(context, + project_id, + session=session) + return {'volumes': volumes} + + +def _sync_snapshots(context, project_id, session): + (snapshots, gigs) = db.snapshot_data_get_for_project(context, + project_id, + session=session) + return {'snapshots': snapshots} + + +def _sync_gigabytes(context, project_id, session): + (_junk, vol_gigs) = db.volume_data_get_for_project(context, + project_id, + session=session) + if FLAGS.no_snapshot_gb_quota: + return {'gigabytes': vol_gigs} + + (_junk, snap_gigs) = db.snapshot_data_get_for_project(context, + project_id, + session=session) + return {'gigabytes': vol_gigs + snap_gigs} + + +QUOTAS = QuotaEngine() + + +resources = [ + ReservableResource('volumes', _sync_volumes, 'quota_volumes'), + ReservableResource('snapshots', _sync_snapshots, 'quota_snapshots'), + ReservableResource('gigabytes', _sync_gigabytes, 'quota_gigabytes'), ] + + +QUOTAS.register_resources(resources) diff --git a/cinder/scheduler/__init__.py b/cinder/scheduler/__init__.py new file mode 100644 index 0000000000..727de580eb --- /dev/null +++ b/cinder/scheduler/__init__.py @@ -0,0 +1,27 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.scheduler` -- Scheduler Nodes +===================================================== + +.. automodule:: cinder.scheduler + :platform: Unix + :synopsis: Module that picks a volume node to create a volume. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe +.. moduleauthor:: Chris Behrens +""" diff --git a/cinder/scheduler/chance.py b/cinder/scheduler/chance.py new file mode 100644 index 0000000000..0393afca53 --- /dev/null +++ b/cinder/scheduler/chance.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Chance (Random) Scheduler implementation +""" + +import random + +from cinder import exception +from cinder import flags +from cinder.scheduler import driver + + +FLAGS = flags.FLAGS + + +class ChanceScheduler(driver.Scheduler): + """Implements Scheduler as a random node selector.""" + + def _filter_hosts(self, request_spec, hosts, **kwargs): + """Filter a list of hosts based on request_spec.""" + + filter_properties = kwargs.get('filter_properties', {}) + ignore_hosts = filter_properties.get('ignore_hosts', []) + hosts = [host for host in hosts if host not in ignore_hosts] + return hosts + + def _schedule(self, context, topic, request_spec, **kwargs): + """Picks a host that is up at random.""" + + elevated = context.elevated() + hosts = self.hosts_up(elevated, topic) + if not hosts: + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + hosts = self._filter_hosts(request_spec, hosts, **kwargs) + if not hosts: + msg = _("Could not find another host") + raise exception.NoValidHost(reason=msg) + + return hosts[int(random.random() * len(hosts))] + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Picks a host that is up at random.""" + topic = FLAGS.volume_topic + host = self._schedule(context, topic, request_spec, + filter_properties=filter_properties) + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + + updated_volume = driver.volume_update_db(context, volume_id, host) + self.volume_rpcapi.create_volume(context, updated_volume, host, + snapshot_id, image_id) + + def schedule_create_share(self, context, request_spec, filter_properties): + """Picks a host that is up at random.""" + topic = FLAGS.share_topic + host = self._schedule(context, topic, request_spec, + filter_properties=filter_properties) + share_id = request_spec['share_id'] + snapshot_id = request_spec['snapshot_id'] + + updated_share = driver.share_update_db(context, share_id, host) + self.share_rpcapi.create_share(context, updated_share, host, + request_spec, + filter_properties, + snapshot_id) diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py new file mode 100644 index 0000000000..b3aa7bf177 --- /dev/null +++ b/cinder/scheduler/driver.py @@ -0,0 +1,109 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler base class that all Schedulers should inherit from +""" + +from oslo.config import cfg + +from cinder import db +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import timeutils +from cinder.share import rpcapi as share_rpcapi +from cinder import utils +from cinder.volume import rpcapi as volume_rpcapi + +scheduler_driver_opts = [ + cfg.StrOpt('scheduler_host_manager', + default='cinder.scheduler.host_manager.HostManager', + help='The scheduler host manager class to use'), + cfg.IntOpt('scheduler_max_attempts', + default=3, + help='Maximum number of attempts to schedule an volume'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(scheduler_driver_opts) + + +def share_update_db(context, share_id, host): + '''Set the host and set the scheduled_at field of a share. + + :returns: A Share with the updated fields set properly. + ''' + now = timeutils.utcnow() + values = {'host': host, 'scheduled_at': now} + return db.share_update(context, share_id, values) + + +def volume_update_db(context, volume_id, host): + '''Set the host and set the scheduled_at field of a volume. + + :returns: A Volume with the updated fields set properly. + ''' + now = timeutils.utcnow() + values = {'host': host, 'scheduled_at': now} + return db.volume_update(context, volume_id, values) + + +class Scheduler(object): + """The base class that all Scheduler classes should inherit from.""" + + def __init__(self): + self.host_manager = importutils.import_object( + FLAGS.scheduler_host_manager) + self.share_rpcapi = share_rpcapi.ShareAPI() + self.volume_rpcapi = volume_rpcapi.VolumeAPI() + + def get_host_list(self): + """Get a list of hosts from the HostManager.""" + return self.host_manager.get_host_list() + + def get_service_capabilities(self): + """Get the normalized set of capabilities for the services. + """ + return self.host_manager.get_service_capabilities() + + def update_service_capabilities(self, service_name, host, capabilities): + """Process a capability update from a service node.""" + self.host_manager.update_service_capabilities(service_name, + host, + capabilities) + + def hosts_up(self, context, topic): + """Return the list of hosts that have a running service for topic.""" + + services = db.service_get_all_by_topic(context, topic) + return [service['host'] + for service in services + if utils.service_is_up(service)] + + def schedule(self, context, topic, method, *_args, **_kwargs): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement a fallback schedule")) + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement schedule_create_volume")) + + def schedule_create_share(self, context, request_spec, filter_properties): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement schedule_create_share")) diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py new file mode 100644 index 0000000000..ef87f40e8e --- /dev/null +++ b/cinder/scheduler/filter_scheduler.py @@ -0,0 +1,354 @@ +# Copyright (c) 2011 Intel Corporation +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The FilterScheduler is for creating volumes. +You can customize this scheduler by specifying your own volume Filters and +Weighing Functions. +""" + +import operator + +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.scheduler import driver +from cinder.scheduler import scheduler_options + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class FilterScheduler(driver.Scheduler): + """Scheduler that can be used for filtering and weighing.""" + def __init__(self, *args, **kwargs): + super(FilterScheduler, self).__init__(*args, **kwargs) + self.cost_function_cache = None + self.options = scheduler_options.SchedulerOptions() + self.max_attempts = self._max_attempts() + + def schedule(self, context, topic, method, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + self._schedule(context, topic, *args, **kwargs) + + def _get_configuration_options(self): + """Fetch options dictionary. Broken out for testing.""" + return self.options.get_configuration() + + def populate_filter_properties(self, request_spec, filter_properties): + """Stuff things into filter_properties. Can be overridden in a + subclass to add more data. + """ + vol = request_spec['volume_properties'] + filter_properties['size'] = vol['size'] + filter_properties['availability_zone'] = vol.get('availability_zone') + filter_properties['user_id'] = vol.get('user_id') + filter_properties['metadata'] = vol.get('metadata') + + def schedule_create_volume(self, context, request_spec, filter_properties): + weighed_host = self._schedule(context, request_spec, + filter_properties) + + if not weighed_host: + raise exception.NoValidHost(reason="") + + host = weighed_host.obj.host + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + + updated_volume = driver.volume_update_db(context, volume_id, host) + self._post_select_populate_filter_properties(filter_properties, + weighed_host.obj) + + # context is not serializable + filter_properties.pop('context', None) + + self.volume_rpcapi.create_volume(context, updated_volume, host, + request_spec=request_spec, + filter_properties=filter_properties, + allow_reschedule=True, + snapshot_id=snapshot_id, + image_id=image_id) + + def _post_select_populate_filter_properties(self, filter_properties, + host_state): + """Add additional information to the filter properties after a host has + been selected by the scheduling process. + """ + # Add a retry entry for the selected volume backend: + self._add_retry_host(filter_properties, host_state.host) + + def _add_retry_host(self, filter_properties, host): + """Add a retry entry for the selected volume backend. In the event that + the request gets re-scheduled, this entry will signal that the given + backend has already been tried. + """ + retry = filter_properties.get('retry', None) + if not retry: + return + hosts = retry['hosts'] + hosts.append(host) + + def _max_attempts(self): + max_attempts = FLAGS.scheduler_max_attempts + if max_attempts < 1: + msg = _("Invalid value for 'scheduler_max_attempts', " + "must be >=1") + raise exception.InvalidParameterValue(err=msg) + return max_attempts + + def _log_volume_error(self, volume_id, retry): + """If the request contained an exception from a previous volume + create operation, log it to aid debugging. + """ + exc = retry.pop('exc', None) # string-ified exception from volume + if not exc: + return # no exception info from a previous attempt, skip + + hosts = retry.get('hosts', None) + if not hosts: + return # no previously attempted hosts, skip + + last_host = hosts[-1] + msg = _("Error scheduling %(volume_id)s from last vol-service: " + "%(last_host)s : %(exc)s") % locals() + LOG.error(msg) + + def _populate_retry(self, filter_properties, properties): + """Populate filter properties with history of retries for this + request. If maximum retries is exceeded, raise NoValidHost. + """ + max_attempts = self.max_attempts + retry = filter_properties.pop('retry', {}) + + if max_attempts == 1: + # re-scheduling is disabled. + return + + # retry is enabled, update attempt count: + if retry: + retry['num_attempts'] += 1 + else: + retry = { + 'num_attempts': 1, + 'hosts': [] # list of volume service hosts tried + } + filter_properties['retry'] = retry + + volume_id = properties.get('volume_id') + self._log_volume_error(volume_id, retry) + + if retry['num_attempts'] > max_attempts: + msg = _("Exceeded max scheduling attempts %(max_attempts)d for " + "volume %(volume_id)s") % locals() + raise exception.NoValidHost(reason=msg) + + def _schedule(self, context, request_spec, filter_properties=None): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + volume_properties = request_spec['volume_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, copying + # 'volume_XX' to 'resource_XX' will make both filters happy. + resource_properties = volume_properties.copy() + volume_type = request_spec.get("volume_type", None) + resource_type = request_spec.get("volume_type", None) + request_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + if filter_properties is None: + filter_properties = {} + self._populate_retry(filter_properties, resource_properties) + + filter_properties.update({'context': context, + 'request_spec': request_spec, + 'config_options': config_options, + 'volume_type': volume_type, + 'resource_type': resource_type}) + + self.populate_filter_properties(request_spec, + filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + hosts = self.host_manager.get_all_host_states(elevated) + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(hosts, + filter_properties) + if not hosts: + return None + + LOG.debug(_("Filtered %(hosts)s") % locals()) + # weighted_host = WeightedHost() ... the best + # host for the job. + weighed_hosts = self.host_manager.get_weighed_hosts(hosts, + filter_properties) + best_host = weighed_hosts[0] + LOG.debug(_("Choosing %(best_host)s") % locals()) + best_host.obj.consume_from_volume(volume_properties) + return best_host + + #NOTE(rushiagr): Methods for scheduling shares + + def schedule_create_share(self, context, request_spec, filter_properties): + weighed_host = self._schedule_share(context, + request_spec, + filter_properties) + + if not weighed_host: + raise exception.NoValidHost(reason="") + + host = weighed_host.obj.host + share_id = request_spec['share_id'] + snapshot_id = request_spec['snapshot_id'] + + updated_share = driver.share_update_db(context, share_id, host) + self._post_select_populate_filter_properties(filter_properties, + weighed_host.obj) + + # context is not serializable + filter_properties.pop('context', None) + + self.share_rpcapi.create_share(context, updated_share, host, + request_spec=request_spec, + filter_properties=filter_properties, + snapshot_id=snapshot_id) + + def _schedule_share(self, context, request_spec, filter_properties=None): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + share_properties = request_spec['share_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, copying + # 'volume_XX' to 'resource_XX' will make both filters happy. + resource_properties = share_properties.copy() + share_type = request_spec.get("share_type", {}) + resource_type = request_spec.get("share_type", {}) + request_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + if filter_properties is None: + filter_properties = {} + self._populate_retry_share(filter_properties, resource_properties) + + filter_properties.update({'context': context, + 'request_spec': request_spec, + 'config_options': config_options, + 'share_type': share_type, + 'resource_type': resource_type + }) + + self.populate_filter_properties_share(request_spec, filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + hosts = self.host_manager.get_all_host_states_share(elevated) + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(hosts, + filter_properties) + if not hosts: + return None + + LOG.debug(_("Filtered share %(hosts)s") % locals()) + # weighted_host = WeightedHost() ... the best + # host for the job. + weighed_hosts = self.host_manager.get_weighed_hosts(hosts, + filter_properties) + best_host = weighed_hosts[0] + LOG.debug(_("Choosing for share: %(best_host)s") % locals()) + #NOTE(rushiagr): updating the available space parameters at same place + best_host.obj.consume_from_volume(share_properties) + return best_host + + def _populate_retry_share(self, filter_properties, properties): + """Populate filter properties with history of retries for this + request. If maximum retries is exceeded, raise NoValidHost. + """ + max_attempts = self.max_attempts + retry = filter_properties.pop('retry', {}) + + if max_attempts == 1: + # re-scheduling is disabled. + return + + # retry is enabled, update attempt count: + if retry: + retry['num_attempts'] += 1 + else: + retry = { + 'num_attempts': 1, + 'hosts': [] # list of share service hosts tried + } + filter_properties['retry'] = retry + + share_id = properties.get('share_id') + self._log_share_error(share_id, retry) + + if retry['num_attempts'] > max_attempts: + msg = _("Exceeded max scheduling attempts %(max_attempts)d for " + "share %(share_id)s") % locals() + raise exception.NoValidHost(reason=msg) + + def _log_share_error(self, share_id, retry): + """If the request contained an exception from a previous share + create operation, log it to aid debugging. + """ + exc = retry.pop('exc', None) # string-ified exception from share + if not exc: + return # no exception info from a previous attempt, skip + + hosts = retry.get('hosts', None) + if not hosts: + return # no previously attempted hosts, skip + + last_host = hosts[-1] + msg = _("Error scheduling %(share_id)s from last share-service: " + "%(last_host)s : %(exc)s") % locals() + LOG.error(msg) + + def populate_filter_properties_share(self, request_spec, + filter_properties): + """Stuff things into filter_properties. Can be overridden in a + subclass to add more data. + """ + shr = request_spec['share_properties'] + filter_properties['size'] = shr['size'] + filter_properties['availability_zone'] = shr.get('availability_zone') + filter_properties['user_id'] = shr.get('user_id') + filter_properties['metadata'] = shr.get('metadata') diff --git a/cinder/scheduler/filters/__init__.py b/cinder/scheduler/filters/__init__.py new file mode 100644 index 0000000000..ce4951de97 --- /dev/null +++ b/cinder/scheduler/filters/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py new file mode 100644 index 0000000000..1f5ec624e2 --- /dev/null +++ b/cinder/scheduler/filters/capacity_filter.py @@ -0,0 +1,57 @@ +# Copyright (c) 2012 Intel +# Copyright (c) 2012 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import math + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters + + +LOG = logging.getLogger(__name__) + + +class CapacityFilter(filters.BaseHostFilter): + """CapacityFilter filters based on volume host's capacity utilization.""" + + def host_passes(self, host_state, filter_properties): + """Return True if host has sufficient capacity.""" + volume_size = filter_properties.get('size') + + if host_state.free_capacity_gb is None: + # Fail Safe + LOG.error(_("Free capacity not set: " + "volume node info collection broken.")) + return False + + free_space = host_state.free_capacity_gb + if free_space == 'infinite' or free_space == 'unknown': + # NOTE(zhiteng) for those back-ends cannot report actual + # available capacity, we assume it is able to serve the + # request. Even if it was not, the retry mechanism is + # able to handle the failure by rescheduling + return True + reserved = float(host_state.reserved_percentage) / 100 + free = math.floor(free_space * (1 - reserved)) + if free < volume_size: + LOG.warning(_("Insufficient free space for volume creation " + "(requested / avail): " + "%(requested)s/%(available)s") + % {'requested': volume_size, + 'available': free}) + + return free >= volume_size diff --git a/cinder/scheduler/filters/retry_filter.py b/cinder/scheduler/filters/retry_filter.py new file mode 100644 index 0000000000..ae84a4e277 --- /dev/null +++ b/cinder/scheduler/filters/retry_filter.py @@ -0,0 +1,45 @@ +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class RetryFilter(filters.BaseHostFilter): + """Filter out nodes that have already been attempted for scheduling + purposes + """ + + def host_passes(self, host_state, filter_properties): + """Skip nodes that have already been attempted.""" + retry = filter_properties.get('retry', None) + if not retry: + # Re-scheduling is disabled + LOG.debug("Re-scheduling is disabled") + return True + + hosts = retry.get('hosts', []) + host = host_state.host + + passes = host not in hosts + pass_msg = "passes" if passes else "fails" + + LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: " + "%(hosts)s") % locals()) + + # Host passes if it's not in the list of previously attempted hosts: + return passes diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py new file mode 100644 index 0000000000..45039902bf --- /dev/null +++ b/cinder/scheduler/host_manager.py @@ -0,0 +1,337 @@ +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Manage hosts in the current zone. +""" + +import UserDict + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common.scheduler import weights +from cinder.openstack.common import timeutils +from cinder import utils + +host_manager_opts = [ + cfg.ListOpt('scheduler_default_filters', + default=[ + 'AvailabilityZoneFilter', + 'CapacityFilter', + 'CapabilitiesFilter' + ], + help='Which filter class names to use for filtering hosts ' + 'when not specified in the request.'), + cfg.ListOpt('scheduler_default_weighers', + default=[ + 'CapacityWeigher' + ], + help='Which weigher class names to use for weighing hosts.') +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(host_manager_opts) + +LOG = logging.getLogger(__name__) + + +class ReadOnlyDict(UserDict.IterableUserDict): + """A read-only dict.""" + def __init__(self, source=None): + self.data = {} + self.update(source) + + def __setitem__(self, key, item): + raise TypeError + + def __delitem__(self, key): + raise TypeError + + def clear(self): + raise TypeError + + def pop(self, key, *args): + raise TypeError + + def popitem(self): + raise TypeError + + def update(self, source=None): + if source is None: + return + elif isinstance(source, UserDict.UserDict): + self.data = source.data + elif isinstance(source, type({})): + self.data = source + else: + raise TypeError + + +class HostState(object): + """Mutable and immutable information tracked for a host.""" + + def __init__(self, host, capabilities=None, service=None): + self.host = host + self.update_capabilities(capabilities, service) + + self.volume_backend_name = None + self.share_backend_name = None + self.vendor_name = None + self.driver_version = 0 + self.storage_protocol = None + self.QoS_support = False + # Mutable available resources. + # These will change as resources are virtually "consumed". + self.total_capacity_gb = 0 + self.free_capacity_gb = None + self.reserved_percentage = 0 + + self.updated = None + + def update_capabilities(self, capabilities=None, service=None): + # Read-only capability dicts + + if capabilities is None: + capabilities = {} + self.capabilities = ReadOnlyDict(capabilities) + if service is None: + service = {} + self.service = ReadOnlyDict(service) + + def update_from_volume_capability(self, capability): + """Update information about a host from its volume_node info.""" + if capability: + if self.updated and self.updated > capability['timestamp']: + return + + self.volume_backend = capability.get('volume_backend_name', None) + self.vendor_name = capability.get('vendor_name', None) + self.driver_version = capability.get('driver_version', None) + self.storage_protocol = capability.get('storage_protocol', None) + self.QoS_support = capability.get('QoS_support', False) + + self.total_capacity_gb = capability['total_capacity_gb'] + self.free_capacity_gb = capability['free_capacity_gb'] + self.reserved_percentage = capability['reserved_percentage'] + + self.updated = capability['timestamp'] + + def update_from_share_capability(self, capability): + """Update information about a host from its volume_node info.""" + if capability: + if self.updated and self.updated > capability['timestamp']: + return + + self.share_backend = capability.get('share_backend_name', None) + self.vendor_name = capability.get('vendor_name', None) + self.driver_version = capability.get('driver_version', None) + self.storage_protocol = capability.get('storage_protocol', None) + self.QoS_support = capability.get('QoS_support', False) + + self.total_capacity_gb = capability['total_capacity_gb'] + self.free_capacity_gb = capability['free_capacity_gb'] + self.reserved_percentage = capability['reserved_percentage'] + + self.updated = capability['timestamp'] + + def consume_from_volume(self, volume): + """Incrementally update host state from an volume.""" + volume_gb = volume['size'] + if self.free_capacity_gb == 'infinite': + # There's virtually infinite space on back-end + pass + elif self.free_capacity_gb == 'unknown': + # Unable to determine the actual free space on back-end + pass + else: + self.free_capacity_gb -= volume_gb + self.updated = timeutils.utcnow() + + def __repr__(self): + return ("host '%s': free_capacity_gb: %s" % + (self.host, self.free_capacity_gb)) + + +class HostManager(object): + """Base HostManager class.""" + + host_state_cls = HostState + + def __init__(self): + self.service_states = {} # { : {: {cap k : v}}} + self.host_state_map = {} + self.filter_handler = filters.HostFilterHandler('cinder.scheduler.' + 'filters') + self.filter_classes = self.filter_handler.get_all_classes() + self.weight_handler = weights.HostWeightHandler('cinder.scheduler.' + 'weights') + self.weight_classes = self.weight_handler.get_all_classes() + + def _choose_host_filters(self, filter_cls_names): + """Since the caller may specify which filters to use we need + to have an authoritative list of what is permissible. This + function checks the filter names against a predefined set + of acceptable filters. + """ + if filter_cls_names is None: + filter_cls_names = FLAGS.scheduler_default_filters + if not isinstance(filter_cls_names, (list, tuple)): + filter_cls_names = [filter_cls_names] + good_filters = [] + bad_filters = [] + for filter_name in filter_cls_names: + found_class = False + for cls in self.filter_classes: + if cls.__name__ == filter_name: + found_class = True + good_filters.append(cls) + break + if not found_class: + bad_filters.append(filter_name) + if bad_filters: + msg = ", ".join(bad_filters) + raise exception.SchedulerHostFilterNotFound(filter_name=msg) + return good_filters + + def _choose_host_weighers(self, weight_cls_names): + """Since the caller may specify which weighers to use, we need + to have an authoritative list of what is permissible. This + function checks the weigher names against a predefined set + of acceptable weighers. + """ + if weight_cls_names is None: + weight_cls_names = FLAGS.scheduler_default_weighers + if not isinstance(weight_cls_names, (list, tuple)): + weight_cls_names = [weight_cls_names] + + good_weighers = [] + bad_weighers = [] + for weigher_name in weight_cls_names: + found_class = False + for cls in self.weight_classes: + if cls.__name__ == weigher_name: + good_weighers.append(cls) + found_class = True + break + if not found_class: + bad_weighers.append(weigher_name) + if bad_weighers: + msg = ", ".join(bad_weighers) + raise exception.SchedulerHostWeigherNotFound(weigher_name=msg) + return good_weighers + + def get_filtered_hosts(self, hosts, filter_properties, + filter_class_names=None): + """Filter hosts and return only ones passing all filters""" + filter_classes = self._choose_host_filters(filter_class_names) + return self.filter_handler.get_filtered_objects(filter_classes, + hosts, + filter_properties) + + def get_weighed_hosts(self, hosts, weight_properties, + weigher_class_names=None): + """Weigh the hosts""" + weigher_classes = self._choose_host_weighers(weigher_class_names) + return self.weight_handler.get_weighed_objects(weigher_classes, + hosts, + weight_properties) + + def update_service_capabilities(self, service_name, host, capabilities): + """Update the per-service capabilities based on this notification.""" + if service_name not in ('volume', 'share'): + LOG.debug(_('Ignoring %(service_name)s service update ' + 'from %(host)s'), locals()) + return + + LOG.debug(_("Received %(service_name)s service update from " + "%(host)s.") % locals()) + + # Copy the capabilities, so we don't modify the original dict + capab_copy = dict(capabilities) + capab_copy["timestamp"] = timeutils.utcnow() # Reported time + self.service_states[host] = capab_copy + + def get_all_host_states(self, context): + """Returns a dict of all the hosts the HostManager + knows about. Also, each of the consumable resources in HostState + are pre-populated and adjusted based on data in the db. + + For example: + {'192.168.1.100': HostState(), ...} + """ + + # Get resource usage across the available volume nodes: + topic = FLAGS.volume_topic + volume_services = db.service_get_all_by_topic(context, topic) + for service in volume_services: + if not utils.service_is_up(service) or service['disabled']: + LOG.warn(_("service is down or disabled.")) + continue + host = service['host'] + capabilities = self.service_states.get(host, None) + host_state = self.host_state_map.get(host) + if host_state: + # copy capabilities to host_state.capabilities + host_state.update_capabilities(capabilities, + dict(service.iteritems())) + else: + host_state = self.host_state_cls(host, + capabilities=capabilities, + service= + dict(service.iteritems())) + self.host_state_map[host] = host_state + # update host_state + host_state.update_from_volume_capability(capabilities) + + return self.host_state_map.itervalues() + + def get_all_host_states_share(self, context): + """Returns a dict of all the hosts the HostManager + knows about. Also, each of the consumable resources in HostState + are pre-populated and adjusted based on data in the db. + + For example: + {'192.168.1.100': HostState(), ...} + """ + + # Get resource usage across the available share nodes: + topic = FLAGS.share_topic + share_services = db.service_get_all_by_topic(context, topic) + for service in share_services: + if not utils.service_is_up(service) or service['disabled']: + LOG.warn(_("service is down or disabled.")) + continue + host = service['host'] + capabilities = self.service_states.get(host, None) + host_state = self.host_state_map.get(host) + if host_state: + # copy capabilities to host_state.capabilities + host_state.update_capabilities(capabilities, + dict(service.iteritems())) + else: + host_state = self.host_state_cls(host, + capabilities=capabilities, + service= + dict(service.iteritems())) + self.host_state_map[host] = host_state + # update host_state + host_state.update_from_share_capability(capabilities) + + return self.host_state_map.itervalues() diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py new file mode 100644 index 0000000000..f41fb0b3b7 --- /dev/null +++ b/cinder/scheduler/manager.py @@ -0,0 +1,178 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler Service +""" + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier +from cinder.share import rpcapi as share_rpcapi +from cinder.volume import rpcapi as volume_rpcapi + +LOG = logging.getLogger(__name__) + +scheduler_driver_opt = cfg.StrOpt('scheduler_driver', + default='cinder.scheduler.filter_scheduler.' + 'FilterScheduler', + help='Default scheduler driver to use') + +FLAGS = flags.FLAGS +FLAGS.register_opt(scheduler_driver_opt) + + +class SchedulerManager(manager.Manager): + """Chooses a host to create volumes.""" + + RPC_API_VERSION = '1.3' + + def __init__(self, scheduler_driver=None, service_name=None, + *args, **kwargs): + if not scheduler_driver: + scheduler_driver = FLAGS.scheduler_driver + self.driver = importutils.import_object(scheduler_driver) + super(SchedulerManager, self).__init__(*args, **kwargs) + + def init_host(self): + ctxt = context.get_admin_context() + self.request_service_capabilities(ctxt) + + def get_host_list(self, context): + """Get a list of hosts from the HostManager.""" + return self.driver.get_host_list() + + def get_service_capabilities(self, context): + """Get the normalized set of capabilities for this zone.""" + return self.driver.get_service_capabilities() + + def update_service_capabilities(self, context, service_name=None, + host=None, capabilities=None, **kwargs): + """Process a capability update from a service node.""" + if capabilities is None: + capabilities = {} + self.driver.update_service_capabilities(service_name, + host, + capabilities) + + def create_volume(self, context, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): + try: + if request_spec is None: + # For RPC version < 1.2 backward compatibility + request_spec = {} + volume_ref = db.volume_get(context, volume_id) + size = volume_ref.get('size') + availability_zone = volume_ref.get('availability_zone') + volume_type_id = volume_ref.get('volume_type_id') + vol_type = db.volume_type_get(context, volume_type_id) + volume_properties = {'size': size, + 'availability_zone': availability_zone, + 'volume_type_id': volume_type_id} + request_spec.update( + {'volume_id': volume_id, + 'snapshot_id': snapshot_id, + 'image_id': image_id, + 'volume_properties': volume_properties, + 'volume_type': dict(vol_type).iteritems()}) + + self.driver.schedule_create_volume(context, request_spec, + filter_properties) + except exception.NoValidHost as ex: + volume_state = {'volume_state': {'status': 'error'}} + self._set_volume_state_and_notify('create_volume', + volume_state, + context, ex, request_spec) + except Exception as ex: + with excutils.save_and_reraise_exception(): + volume_state = {'volume_state': {'status': 'error'}} + self._set_volume_state_and_notify('create_volume', + volume_state, + context, ex, request_spec) + + def create_share(self, context, topic, share_id, snapshot_id=None, + request_spec=None, filter_properties=None): + try: + self.driver.schedule_create_share(context, request_spec, + filter_properties) + except exception.NoValidHost as ex: + self._set_share_error_state_and_notify('create_share', + context, ex, request_spec) + except Exception as ex: + with excutils.save_and_reraise_exception(): + self._set_share_error_state_and_notify('create_share', + context, ex, + request_spec) + + def _set_share_error_state_and_notify(self, method, context, ex, + request_spec): + LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) + + share_state = {'status': 'error'} + properties = request_spec.get('share_properties', {}) + + share_id = request_spec.get('share_id', None) + + if share_id: + db.share_update(context, share_id, share_state) + + payload = dict(request_spec=request_spec, + share_properties=properties, + share_id=share_id, + state=share_state, + method=method, + reason=ex) + + notifier.notify(context, notifier.publisher_id("scheduler"), + 'scheduler.' + method, notifier.ERROR, payload) + + def _set_volume_state_and_notify(self, method, updates, context, ex, + request_spec): + LOG.error(_("Failed to schedule_%(method)s: %(ex)s") % locals()) + + volume_state = updates['volume_state'] + properties = request_spec.get('volume_properties', {}) + + volume_id = request_spec.get('volume_id', None) + + if volume_id: + db.volume_update(context, volume_id, volume_state) + + payload = dict(request_spec=request_spec, + volume_properties=properties, + volume_id=volume_id, + state=volume_state, + method=method, + reason=ex) + + notifier.notify(context, notifier.publisher_id("scheduler"), + 'scheduler.' + method, notifier.ERROR, payload) + + def request_service_capabilities(self, context): + volume_rpcapi.VolumeAPI().publish_service_capabilities(context) + share_rpcapi.ShareAPI().publish_service_capabilities(context) diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py new file mode 100644 index 0000000000..4a9bc0a2db --- /dev/null +++ b/cinder/scheduler/rpcapi.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the scheduler manager RPC API. +""" + +from cinder import flags +from cinder.openstack.common import jsonutils +import cinder.openstack.common.rpc.proxy + + +FLAGS = flags.FLAGS + + +class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the scheduler rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Add create_volume() method + 1.2 - Add request_spec, filter_properties arguments + to create_volume() + 1.3 - Add create_share() method + ''' + + RPC_API_VERSION = '1.0' + + def __init__(self): + super(SchedulerAPI, self).__init__( + topic=FLAGS.scheduler_topic, + default_version=self.RPC_API_VERSION) + + def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'create_volume', + topic=topic, + volume_id=volume_id, + snapshot_id=snapshot_id, + image_id=image_id, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.2') + + def create_share(self, ctxt, topic, share_id, snapshot_id=None, + request_spec=None, filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'create_share', + topic=topic, + share_id=share_id, + snapshot_id=snapshot_id, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.3') + + def update_service_capabilities(self, ctxt, + service_name, host, + capabilities): + self.fanout_cast(ctxt, self.make_msg('update_service_capabilities', + service_name=service_name, host=host, + capabilities=capabilities)) diff --git a/cinder/scheduler/scheduler_options.py b/cinder/scheduler/scheduler_options.py new file mode 100644 index 0000000000..9b05bb7178 --- /dev/null +++ b/cinder/scheduler/scheduler_options.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SchedulerOptions monitors a local .json file for changes and loads +it if needed. This file is converted to a data structure and passed +into the filtering and weighing functions which can use it for +dynamic configuration. +""" + +import datetime +import json +import os + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +scheduler_json_config_location_opt = cfg.StrOpt( + 'scheduler_json_config_location', + default='', + help='Absolute path to scheduler configuration JSON file.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(scheduler_json_config_location_opt) + +LOG = logging.getLogger(__name__) + + +class SchedulerOptions(object): + """ + SchedulerOptions monitors a local .json file for changes and loads it + if needed. This file is converted to a data structure and passed into + the filtering and weighing functions which can use it for dynamic + configuration. + """ + + def __init__(self): + super(SchedulerOptions, self).__init__() + self.data = {} + self.last_modified = None + self.last_checked = None + + def _get_file_handle(self, filename): + """Get file handle. Broken out for testing.""" + return open(filename) + + def _get_file_timestamp(self, filename): + """Get the last modified datetime. Broken out for testing.""" + try: + return os.path.getmtime(filename) + except os.error, e: + LOG.exception(_("Could not stat scheduler options file " + "%(filename)s: '%(e)s'"), locals()) + raise + + def _load_file(self, handle): + """Decode the JSON file. Broken out for testing.""" + try: + return json.load(handle) + except ValueError, e: + LOG.exception(_("Could not decode scheduler options: " + "'%(e)s'") % locals()) + return {} + + def _get_time_now(self): + """Get current UTC. Broken out for testing.""" + return timeutils.utcnow() + + def get_configuration(self, filename=None): + """Check the json file for changes and load it if needed.""" + if not filename: + filename = FLAGS.scheduler_json_config_location + if not filename: + return self.data + if self.last_checked: + now = self._get_time_now() + if now - self.last_checked < datetime.timedelta(minutes=5): + return self.data + + last_modified = self._get_file_timestamp(filename) + if (not last_modified or not self.last_modified or + last_modified > self.last_modified): + self.data = self._load_file(self._get_file_handle(filename)) + self.last_modified = last_modified + if not self.data: + self.data = {} + + return self.data diff --git a/cinder/scheduler/simple.py b/cinder/scheduler/simple.py new file mode 100644 index 0000000000..c088a72f6f --- /dev/null +++ b/cinder/scheduler/simple.py @@ -0,0 +1,137 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple Scheduler +""" + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.scheduler import chance +from cinder.scheduler import driver +from cinder import utils + +simple_scheduler_opts = [ + cfg.IntOpt("max_gigabytes", + default=10000, + help="maximum number of volume gigabytes to allow per host"), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(simple_scheduler_opts) + + +class SimpleScheduler(chance.ChanceScheduler): + """Implements Naive Scheduler that tries to find least loaded host.""" + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Picks a host that is up and has the fewest volumes.""" + elevated = context.elevated() + + volume_id = request_spec.get('volume_id') + snapshot_id = request_spec.get('snapshot_id') + image_id = request_spec.get('image_id') + volume_properties = request_spec.get('volume_properties') + volume_size = volume_properties.get('size') + availability_zone = volume_properties.get('availability_zone') + + zone, host = None, None + if availability_zone: + zone, _x, host = availability_zone.partition(':') + if host and context.is_admin: + topic = FLAGS.volume_topic + service = db.service_get_by_args(elevated, host, topic) + if not utils.service_is_up(service): + raise exception.WillNotSchedule(host=host) + updated_volume = driver.volume_update_db(context, volume_id, host) + self.volume_rpcapi.create_volume(context, + updated_volume, + host, + snapshot_id, + image_id) + return None + + results = db.service_get_all_volume_sorted(elevated) + if zone: + results = [(service, gigs) for (service, gigs) in results + if service['availability_zone'] == zone] + for result in results: + (service, volume_gigabytes) = result + if volume_gigabytes + volume_size > FLAGS.max_gigabytes: + msg = _("Not enough allocatable volume gigabytes remaining") + raise exception.NoValidHost(reason=msg) + if utils.service_is_up(service) and not service['disabled']: + updated_volume = driver.volume_update_db(context, volume_id, + service['host']) + self.volume_rpcapi.create_volume(context, + updated_volume, + service['host'], + snapshot_id, + image_id) + return None + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + def schedule_create_share(self, context, request_spec, filter_properties): + """Picks a host that is up and has the fewest shares.""" + #TODO(rushiagr) - pick only hosts that run shares + elevated = context.elevated() + + share_id = request_spec.get('share_id') + snapshot_id = request_spec.get('snapshot_id') + share_properties = request_spec.get('share_properties') + share_size = share_properties.get('size') + availability_zone = share_properties.get('availability_zone') + + zone, host = None, None + if availability_zone: + zone, _x, host = availability_zone.partition(':') + if host and context.is_admin: + service = db.service_get_by_args(elevated, host, FLAGS.share_topic) + if not utils.service_is_up(service): + raise exception.WillNotSchedule(host=host) + updated_share = driver.share_update_db(context, share_id, host) + self.share_rpcapi.create_share(context, + updated_share, + host, + snapshot_id, + None) + return None + + results = db.service_get_all_share_sorted(elevated) + if zone: + results = [(service, gigs) for (service, gigs) in results + if service['availability_zone'] == zone] + for result in results: + (service, share_gigabytes) = result + if share_gigabytes + share_size > FLAGS.max_gigabytes: + msg = _("Not enough allocatable share gigabytes remaining") + raise exception.NoValidHost(reason=msg) + if utils.service_is_up(service) and not service['disabled']: + updated_share = driver.share_update_db(context, share_id, + service['host']) + self.share_rpcapi.create_share(context, + updated_share, + service['host'], + snapshot_id, None) + return None + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) diff --git a/cinder/scheduler/weights/__init__.py b/cinder/scheduler/weights/__init__.py new file mode 100644 index 0000000000..ce4951de97 --- /dev/null +++ b/cinder/scheduler/weights/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/scheduler/weights/capacity.py b/cinder/scheduler/weights/capacity.py new file mode 100644 index 0000000000..e2042e9cd7 --- /dev/null +++ b/cinder/scheduler/weights/capacity.py @@ -0,0 +1,56 @@ +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Capacity Weigher. Weigh hosts by their available capacity. + +The default is to spread volumes across all hosts evenly. If you prefer +stacking, you can set the 'capacity_weight_multiplier' option to a negative +number and the weighing has the opposite effect of the default. +""" + +import math + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common.scheduler import weights + +capacity_weight_opts = [ + cfg.FloatOpt('capacity_weight_multiplier', + default=1.0, + help='Multiplier used for weighing volume capacity. ' + 'Negative numbers mean to stack vs spread.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(capacity_weight_opts) + + +class CapacityWeigher(weights.BaseHostWeigher): + def _weight_multiplier(self): + """Override the weight multiplier.""" + return FLAGS.capacity_weight_multiplier + + def _weigh_object(self, host_state, weight_properties): + """Higher weights win. We want spreading to be the default.""" + reserved = float(host_state.reserved_percentage) / 100 + free_space = host_state.free_capacity_gb + if free_space == 'infinite' or free_space == 'unknown': + #(zhiteng) 'infinite' and 'unknown' are treated the same + # here, for sorting purpose. + free = float('inf') + else: + free = math.floor(host_state.free_capacity_gb * (1 - reserved)) + return free diff --git a/cinder/service.py b/cinder/service.py new file mode 100644 index 0000000000..4e7c549770 --- /dev/null +++ b/cinder/service.py @@ -0,0 +1,622 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import inspect +import os +import random +import signal +import sys +import time + +import eventlet +import greenlet +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder import utils +from cinder import version +from cinder import wsgi + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.IntOpt('report_interval', + default=10, + help='seconds between nodes reporting state to datastore'), + cfg.IntOpt('periodic_interval', + default=60, + help='seconds between running periodic tasks'), + cfg.IntOpt('periodic_fuzzy_delay', + default=60, + help='range of seconds to randomly delay when starting the' + ' periodic task scheduler to reduce stampeding.' + ' (Disable by setting to 0)'), + cfg.StrOpt('osapi_volume_listen', + default="0.0.0.0", + help='IP address for OpenStack Volume API to listen'), + cfg.IntOpt('osapi_volume_listen_port', + default=8776, + help='port for os volume api to listen'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(service_opts) + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = [] + + @staticmethod + def run_server(server): + """Start and wait for a server to finish. + + :param service: Server to run and wait for. + :returns: None + + """ + server.start() + server.wait() + + def launch_server(self, server): + """Load and start the given server. + + :param server: The server you would like to start. + :returns: None + + """ + gt = eventlet.spawn(self.run_server, server) + self._services.append(gt) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + for service in self._services: + service.kill() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + def sigterm(sig, frame): + LOG.audit(_("SIGTERM received")) + # NOTE(jk0): Raise a ^C which is caught by the caller and cleanly + # shuts down the service. This does not yet handle eventlet + # threads. + raise KeyboardInterrupt + + signal.signal(signal.SIGTERM, sigterm) + + for service in self._services: + try: + service.wait() + except greenlet.GreenletExit: + pass + + +class ServerWrapper(object): + def __init__(self, server, workers): + self.server = server + self.workers = workers + self.children = set() + self.forktimes = [] + self.failed = False + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.totalwrap = 0 + self.failedwrap = 0 + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process(self, server): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + signal.signal(signal.SIGTERM, _sigterm) + # Block SIGINT and let the parent send us a SIGTERM + # signal.signal(signal.SIGINT, signal.SIG_IGN) + # This differs from the behavior in nova in that we dont ignore this + # It allows the non-wsgi services to be terminated properly + signal.signal(signal.SIGINT, _sigterm) + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.run_server(server) + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + status = 0 + try: + self._child_process(wrap.server) + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + wrap.server.stop() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_server(self, server, workers=1): + wrap = ServerWrapper(server, workers) + self.totalwrap = self.totalwrap + 1 + LOG.info(_('Starting %d workers'), wrap.workers) + while (self.running and len(wrap.children) < wrap.workers + and not wrap.failed): + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + code = 0 + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals()) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)d exited with status %(code)d'), locals()) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + if 2 == code: + wrap.failed = True + self.failedwrap = self.failedwrap + 1 + LOG.info(_('_wait_child %d'), self.failedwrap) + if self.failedwrap == self.totalwrap: + self.running = False + return wrap + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(.01) + continue + + LOG.info(_('wait wrap.failed %s'), wrap.failed) + while (self.running and len(wrap.children) < wrap.workers + and not wrap.failed): + self._start_child(wrap) + + if self.sigcaught: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[self.sigcaught] + LOG.info(_('Caught %s, stopping children'), signame) + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager and reports + it state to the database services table.""" + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, periodic_fuzzy_delay=None, + service_name=None, *args, **kwargs): + self.host = host + self.binary = binary + self.topic = topic + self.manager_class_name = manager + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, + service_name=service_name, + *args, **kwargs) + self.report_interval = report_interval + self.periodic_interval = periodic_interval + self.periodic_fuzzy_delay = periodic_fuzzy_delay + super(Service, self).__init__(*args, **kwargs) + self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] + + def start(self): + version_string = version.version_string() + LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'), + {'topic': self.topic, 'version_string': version_string}) + self.manager.init_host() + self.model_disconnected = False + ctxt = context.get_admin_context() + try: + service_ref = db.service_get_by_args(ctxt, + self.host, + self.binary) + self.service_id = service_ref['id'] + except exception.NotFound: + self._create_service_ref(ctxt) + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + rpc_dispatcher = self.manager.create_rpc_dispatcher() + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) + + self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + if self.report_interval: + pulse = utils.LoopingCall(self.report_state) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + self.timers.append(pulse) + + if self.periodic_interval: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + periodic = utils.LoopingCall(self.periodic_tasks) + periodic.start(interval=self.periodic_interval, + initial_delay=initial_delay) + self.timers.append(periodic) + + def _create_service_ref(self, context): + zone = FLAGS.storage_availability_zone + service_ref = db.service_create(context, + {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0, + 'availability_zone': zone}) + self.service_id = service_ref['id'] + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None, + periodic_fuzzy_delay=None, service_name=None): + """Instantiates class and passes back application object. + + :param host: defaults to FLAGS.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'cinder-' part + :param manager: defaults to FLAGS._manager + :param report_interval: defaults to FLAGS.report_interval + :param periodic_interval: defaults to FLAGS.periodic_interval + :param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay + + """ + if not host: + host = FLAGS.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary + if not manager: + subtopic = topic.rpartition('cinder-')[2] + manager = FLAGS.get('%s_manager' % subtopic, None) + if report_interval is None: + report_interval = FLAGS.report_interval + if periodic_interval is None: + periodic_interval = FLAGS.periodic_interval + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_interval=periodic_interval, + periodic_fuzzy_delay=periodic_fuzzy_delay, + service_name=service_name) + + return service_obj + + def kill(self): + """Destroy the service object in the datastore.""" + self.stop() + try: + db.service_destroy(context.get_admin_context(), self.service_id) + except exception.NotFound: + LOG.warn(_('Service killed that has no database entry')) + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + for x in self.timers: + try: + x.stop() + except Exception: + pass + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except Exception: + pass + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def report_state(self): + """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() + zone = FLAGS.storage_availability_zone + state_catalog = {} + try: + try: + service_ref = db.service_get(ctxt, self.service_id) + except exception.NotFound: + LOG.debug(_('The service database object disappeared, ' + 'Recreating it.')) + self._create_service_ref(ctxt) + service_ref = db.service_get(ctxt, self.service_id) + + state_catalog['report_count'] = service_ref['report_count'] + 1 + if zone != service_ref['availability_zone']: + state_catalog['availability_zone'] = zone + + db.service_update(ctxt, + self.service_id, state_catalog) + + # TODO(termie): make this pattern be more elegant. + if getattr(self, 'model_disconnected', False): + self.model_disconnected = False + LOG.error(_('Recovered model server connection!')) + + # TODO(vish): this should probably only catch connection errors + except Exception: # pylint: disable=W0702 + if not getattr(self, 'model_disconnected', False): + self.model_disconnected = True + LOG.exception(_('model server went away')) + + +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" + + def __init__(self, name, loader=None): + """Initialize, but do not start the WSGI server. + + :param name: The name of the WSGI server given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None + + """ + self.name = name + self.manager = self._get_manager() + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0") + self.port = getattr(FLAGS, '%s_listen_port' % name, 0) + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port) + + def _get_manager(self): + """Initialize a Manager object appropriate for this service. + + Use the service name to look up a Manager subclass from the + configuration and initialize an instance. If no class name + is configured, just return None. + + :returns: a Manager instance, or None. + + """ + fl = '%s_manager' % self.name + if fl not in FLAGS: + return None + + manager_class_name = FLAGS.get(fl, None) + if not manager_class_name: + return None + + manager_class = importutils.import_class(manager_class_name) + return manager_class() + + def start(self): + """Start serving this service using loaded configuration. + + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. + + :returns: None + + """ + if self.manager: + self.manager.init_host() + self.server.start() + self.port = self.server.port + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() + + +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + +def serve(*servers): + global _launcher + if not _launcher: + _launcher = Launcher() + for server in servers: + _launcher.launch_server(server) + + +def wait(): + LOG.debug(_('Full set of FLAGS:')) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + # hide flag contents from log if contains a password + # should use secret flag when switch over to openstack-common + if ("_password" in flag or "_key" in flag or + (flag == "sql_connection" and "mysql:" in flag_get)): + LOG.debug(_('%(flag)s : FLAG SET ') % locals()) + else: + LOG.debug('%(flag)s : %(flag_get)s' % locals()) + try: + _launcher.wait() + except KeyboardInterrupt: + _launcher.stop() + rpc.cleanup() diff --git a/cinder/share/__init__.py b/cinder/share/__init__.py new file mode 100644 index 0000000000..da1ef5addc --- /dev/null +++ b/cinder/share/__init__.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.share import ' elsewhere. +import cinder.flags +import cinder.openstack.common.importutils + +API = cinder.openstack.common.importutils.import_class( + cinder.flags.FLAGS.share_api_class) diff --git a/cinder/share/api.py b/cinder/share/api.py new file mode 100644 index 0000000000..633ea4fcec --- /dev/null +++ b/cinder/share/api.py @@ -0,0 +1,325 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to shares. +""" + +import functools + +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.image import glance +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common import timeutils +import cinder.policy +from cinder import quota +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder.share import rpcapi as share_rpcapi + +from oslo.config import cfg + + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) +GB = 1048576 * 1024 + + +def wrap_check_policy(func): + """Check policy corresponding to the wrapped methods prior to execution. + + This decorator requires the first 3 args of the wrapped function + to be (self, context, share). + """ + @functools.wraps(func) + def wrapped(self, context, target_obj, *args, **kwargs): + check_policy(context, func.__name__, target_obj) + return func(self, context, target_obj, *args, **kwargs) + + return wrapped + + +def check_policy(context, action, target_obj=None): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + target.update(target_obj or {}) + _action = 'share:%s' % action + cinder.policy.enforce(context, _action, target) + + +class API(base.Base): + """API for interacting with the share manager.""" + + def __init__(self, db_driver=None): + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.share_rpcapi = share_rpcapi.ShareAPI() + super(API, self).__init__(db_driver) + + def create(self, context, share_proto, size, name, description, + snapshot=None, availability_zone=None): + """Create new share.""" + + if snapshot is not None: + if snapshot['status'] != 'available': + msg = _('status must be available') + raise exception.InvalidShareSnapshot(reason=msg) + if not size: + size = snapshot['share_size'] + + snapshot_id = snapshot['id'] + else: + snapshot_id = None + + def as_int(s): + try: + return int(s) + except (ValueError, TypeError): + return s + + # tolerate size as stringified int + size = as_int(size) + + if not isinstance(size, int) or size <= 0: + msg = (_("Share size '%s' must be an integer and greater than 0") + % size) + raise exception.InvalidInput(reason=msg) + + #TODO(rushiagr): Find a suitable place to keep all the allowed + # share types so that it becomes easier to add one + if share_proto.lower() not in ['nfs', 'cifs']: + msg = (_("Invalid share type provided: %s") % share_proto) + raise exception.InvalidInput(reason=msg) + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + options = {'size': size, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': snapshot_id, + 'availability_zone': availability_zone, + 'status': "creating", + 'scheduled_at': timeutils.utcnow(), + 'display_name': name, + 'display_description': description, + 'share_proto': share_proto, + } + + share = self.db.share_create(context, options) + + request_spec = {'share_properties': options, + 'share_proto': share_proto, + 'share_id': share['id'], + 'snapshot_id': share['snapshot_id'], + } + + filter_properties = {} + + self.scheduler_rpcapi.create_share( + context, + FLAGS.share_topic, + share['id'], + snapshot_id, + request_spec=request_spec, + filter_properties=filter_properties) + + return share + + def delete(self, context, share): + """Delete share.""" + share_id = share['id'] + if not share['host']: + # NOTE(rushiagr): scheduling failed, delete + self.db.share_delete(context, share_id) + return + + if share['status'] not in ["available", "error"]: + msg = _("Share status must be available or error") + raise exception.InvalidShare(reason=msg) + + snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) + if len(snapshots): + msg = _("Share still has %d dependent snapshots") % len(snapshots) + raise exception.InvalidVolume(reason=msg) + + now = timeutils.utcnow() + share = self.db.share_update(context, share_id, {'status': 'deleting', + 'terminated_at': now}) + + self.share_rpcapi.delete_share(context, share) + + def create_snapshot(self, context, share, name, description, + force=False): + check_policy(context, 'create_snapshot', share) + + if ((not force) and (share['status'] != "available")): + msg = _("must be available") + raise exception.InvalidShare(reason=msg) + + options = {'share_id': share['id'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'share_size': share['size'], + 'display_name': name, + 'display_description': description, + 'share_proto': share['share_proto'], + 'export_location': share['export_location']} + + snapshot = self.db.share_snapshot_create(context, options) + self.share_rpcapi.create_snapshot(context, share, snapshot) + return snapshot + + @wrap_check_policy + def delete_snapshot(self, context, snapshot, force=False): + if not force and snapshot['status'] not in ["available", "error"]: + msg = _("Share Snapshot status must be available or error") + raise exception.InvalidShareSnapshot(reason=msg) + + self.db.share_snapshot_update(context, snapshot['id'], + {'status': 'deleting'}) + share = self.db.share_get(context, snapshot['share_id']) + self.share_rpcapi.delete_snapshot(context, snapshot, share['host']) + + @wrap_check_policy + def update(self, context, share, fields): + self.db.share_update(context, share['id'], fields) + + def get(self, context, share_id): + rv = self.db.share_get(context, share_id) + check_policy(context, 'get', rv) + return rv + + def get_all(self, context, search_opts={}): + check_policy(context, 'get_all') + + search_opts = search_opts or {} + + if (context.is_admin and 'all_tenants' in search_opts): + # Need to remove all_tenants to pass the filtering below. + del search_opts['all_tenants'] + shares = self.db.share_get_all(context) + else: + shares = self.db.share_get_all_by_project(context, + context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + results = [] + not_found = object() + for share in shares: + for opt, value in search_opts.iteritems(): + if share.get(opt, not_found) != value: + break + else: + results.append(share) + shares = results + return shares + + def get_snapshot(self, context, snapshot_id): + check_policy(context, 'get_snapshot') + rv = self.db.share_snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context, search_opts=None): + check_policy(context, 'get_all_snapshots') + + search_opts = search_opts or {} + + if (context.is_admin and 'all_tenants' in search_opts): + # Need to remove all_tenants to pass the filtering below. + del search_opts['all_tenants'] + snapshots = self.db.share_snapshot_get_all(context) + else: + snapshots = self.db.share_snapshot_get_all_by_project( + context, context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + results = [] + not_found = object() + for snapshot in snapshots: + for opt, value in search_opts.iteritems(): + if snapshot.get(opt, not_found) != value: + break + else: + results.append(snapshot) + snapshots = results + return snapshots + + def allow_access(self, ctx, share, access_type, access_to): + """Allow access to share.""" + if not share['host']: + msg = _("Share host is None") + raise exception.InvalidShare(reason=msg) + if share['status'] not in ["available"]: + msg = _("Share status must be available") + raise exception.InvalidShare(reason=msg) + check_policy(ctx, 'allow_access') + values = {'share_id': share['id'], + 'access_type': access_type, + 'access_to': access_to} + access = self.db.share_access_create(ctx, values) + self.share_rpcapi.allow_access(ctx, share, access) + return access + + def deny_access(self, ctx, share, access): + """Deny access to share.""" + check_policy(ctx, 'deny_access') + #First check state of the target share + if not share['host']: + msg = _("Share host is None") + raise exception.InvalidShare(reason=msg) + if share['status'] not in ["available"]: + msg = _("Share status must be available") + raise exception.InvalidShare(reason=msg) + + #Then check state of the access rule + if access['state'] == access.STATE_ERROR: + self.db.share_access_delete(ctx, access["id"]) + elif access['state'] == access.STATE_ACTIVE: + self.db.share_access_update(ctx, access["id"], + {'state': access.STATE_DELETING}) + self.share_rpcapi.deny_access(ctx, share, access) + else: + msg = _("Access policy should be active or in error state") + raise exception.InvalidShareAccess(reason=msg) + #update share state and send message to manager + + def access_get_all(self, context, share): + """Returns all access rules for share.""" + check_policy(context, 'access_get_all') + rules = self.db.share_access_get_all_for_share(context, share['id']) + return [{'id': rule.id, + 'access_type': rule.access_type, + 'access_to': rule.access_to, + 'state': rule.state} for rule in rules] + + def access_get(self, context, access_id): + """Returns access rule with the id.""" + check_policy(context, 'access_get') + rule = self.db.share_access_get(context, access_id) + return rule diff --git a/cinder/share/configuration.py b/cinder/share/configuration.py new file mode 100644 index 0000000000..4b9ae08b17 --- /dev/null +++ b/cinder/share/configuration.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2012 Rackspace Hosting +# Copyright (c) 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Configuration support for all drivers. + +This module allows support for setting configurations either from default +or from a particular FLAGS group, to be able to set multiple configurations +for a given set of values. + +For instance, two lvm configurations can be set by naming them in groups as + + [lvm1] + volume_group=lvm-group-1 + ... + + [lvm2] + volume_group=lvm-group-2 + ... + +And the configuration group name will be passed in so that all calls to +configuration.volume_group within that instance will be mapped to the proper +named group. + +This class also ensures the implementation's configuration is grafted into the +option group. This is due to the way cfg works. All cfg options must be defined +and registered in the group in which they are used. +""" + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class Configuration(object): + + def __init__(self, share_opts, config_group=None): + """This takes care of grafting the implementation's config + values into the config group.""" + self.config_group = config_group + + # set the local conf so that __call__'s know what to use + if self.config_group: + self._ensure_config_values(share_opts) + self.local_conf = FLAGS._get(self.config_group) + else: + self.local_conf = FLAGS + + def _ensure_config_values(self, share_opts): + FLAGS.register_opts(share_opts, + group=self.config_group) + + def append_config_values(self, share_opts): + self._ensure_config_values(share_opts) + + def safe_get(self, value): + try: + return self.__getattr__(value) + except cfg.NoSuchOptError: + return None + + def __getattr__(self, value): + return getattr(self.local_conf, value) diff --git a/cinder/share/driver.py b/cinder/share/driver.py new file mode 100644 index 0000000000..6a6fb1ba46 --- /dev/null +++ b/cinder/share/driver.py @@ -0,0 +1,178 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for shares. + +""" + +import ConfigParser +import os +import re +import time + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.share.configuration import Configuration +from cinder import utils + +from oslo.config import cfg + + +LOG = logging.getLogger(__name__) + +share_opts = [ + #NOTE(rushiagr): Reasonable to define this option at only one place. + cfg.IntOpt('num_shell_tries', + default=3, + help='number of times to attempt to run flakey shell commands'), + cfg.IntOpt('reserved_share_percentage', + default=0, + help='The percentage of backend capacity reserved'), + cfg.StrOpt('share_backend_name', + default=None, + help='The backend name for a given driver implementation'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(share_opts) + + +#TODO(rushiagr): keep the configuration option in only one class and not two +#NOTE(rushiagr): The right place for this class is cinder.driver or +# cinder.utils. +class ExecuteMixin(object): + """Provides an executable functionality to a driver class.""" + + def __init__(self, *args, **kwargs): + self.db = None + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(share_opts) + self.set_execute(kwargs.pop('execute', utils.execute)) + + def set_execute(self, execute): + self._execute = execute + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except exception.ProcessExecutionError: + tries = tries + 1 + if tries >= self.configuration.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + +class ShareDriver(object): + """Class defines interface of NAS driver.""" + + def __init__(self, *args, **kwargs): + super(ShareDriver, self).__init__() + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(share_opts) + + def allocate_container(self, context, share): + """Is called to allocate container for share.""" + raise NotImplementedError() + + def allocate_container_from_snapshot(self, context, share, snapshot): + """Is called to create share from snapshot.""" + raise NotImplementedError() + + def deallocate_container(self, context, share): + """Is called to deallocate container of share.""" + raise NotImplementedError() + + def create_share(self, context, share): + """Is called to create share.""" + raise NotImplementedError() + + def create_snapshot(self, context, snapshot): + """Is called to create snapshot.""" + raise NotImplementedError() + + def delete_share(self, context, share): + """Is called to remove share.""" + raise NotImplementedError() + + def delete_snapshot(self, context, snapshot): + """Is called to remove snapshot.""" + raise NotImplementedError() + + def create_export(self, context, share): + """Is called to export share.""" + raise NotImplementedError() + + def remove_export(self, context, share): + """Is called to stop exporting share.""" + raise NotImplementedError() + + def ensure_share(self, context, share): + """Invoked to sure that share is exported.""" + raise NotImplementedError() + + def allow_access(self, context, share, access): + """Allow access to the share.""" + raise NotImplementedError() + + def deny_access(self, context, share, access): + """Deny access to the share.""" + raise NotImplementedError() + + def check_for_setup_error(self): + """Check for setup error.""" + pass + + def do_setup(self, context): + """Any initialization the share driver does while starting.""" + pass + + def get_share_stats(self, refresh=False): + """Get share status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_share_status() + + return self._stats + + def _update_share_status(self): + """Retrieve status info from share group.""" + + LOG.debug(_("Updating share status")) + data = {} + backend_name = self.configuration.safe_get('share_backend_name') + data["share_backend_name"] = backend_name or 'Generic_NFS' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + data["storage_protocol"] = None + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data diff --git a/cinder/share/drivers/__init__.py b/cinder/share/drivers/__init__.py new file mode 100644 index 0000000000..96f778e81e --- /dev/null +++ b/cinder/share/drivers/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.share.driver` -- Cinder Share Drivers +===================================================== + +.. automodule:: cinder.share.driver + :platform: Unix + :synopsis: Module containing all the Cinder Share drivers. +""" diff --git a/cinder/share/drivers/lvm.py b/cinder/share/drivers/lvm.py new file mode 100644 index 0000000000..f81b144571 --- /dev/null +++ b/cinder/share/drivers/lvm.py @@ -0,0 +1,609 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +LVM Driver for shares. + +""" + +import ConfigParser +import math +import os +import re + +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.share import driver +from cinder import utils + +from oslo.config import cfg + + +LOG = logging.getLogger(__name__) + +share_opts = [ + cfg.StrOpt('share_export_root', + default='$state_path/mnt', + help='Base folder where exported shares are located'), + cfg.StrOpt('share_export_ip', + default=None, + help='IP to be added to export string'), + cfg.StrOpt('smb_config_path', + default='$state_path/smb.conf'), + cfg.IntOpt('share_lvm_mirrors', + default=0, + help='If set, create lvms with multiple mirrors. Note that ' + 'this requires lvm_mirrors + 2 pvs with available space'), + cfg.StrOpt('share_volume_group', + default='cinder-shares', + help='Name for the VG that will contain exported shares'), + cfg.ListOpt('share_lvm_helpers', + default=[ + 'CIFS=cinder.share.drivers.lvm.CIFSNetConfHelper', + 'NFS=cinder.share.drivers.lvm.NFSHelper', + ], + help='Specify list of share export helpers.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(share_opts) + + +class LVMShareDriver(driver.ExecuteMixin, driver.ShareDriver): + """Executes commands relating to Shares.""" + + def __init__(self, db, *args, **kwargs): + """Do initialization.""" + super(LVMShareDriver, self).__init__(*args, **kwargs) + self.db = db + self._helpers = None + self.configuration.append_config_values(share_opts) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + out, err = self._execute('vgs', '--noheadings', '-o', 'name', + run_as_root=True) + volume_groups = out.split() + if self.configuration.share_volume_group not in volume_groups: + msg = (_("share volume group %s doesn't exist") + % self.configuration.share_volume_group) + raise exception.InvalidParameterValue(err=msg) + if not self.configuration.share_export_ip: + msg = (_("share_export_ip doesn't specified")) + raise exception.InvalidParameterValue(err=msg) + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + super(LVMShareDriver, self).do_setup(context) + self._setup_helpers() + for helper in self._helpers.values(): + helper.init() + + def _setup_helpers(self): + """Initializes protocol-specific NAS drivers.""" + self._helpers = {} + for helper_str in self.configuration.share_lvm_helpers: + share_proto, _, import_str = helper_str.partition('=') + helper = importutils.import_class(import_str) + #TODO(rushiagr): better way to handle configuration + # instead of just passing to the helper + self._helpers[share_proto.upper()] = helper(self._execute, + self.configuration) + + def _local_path(self, share): + # NOTE(vish): stops deprecation warning + escaped_group = \ + self.configuration.share_volume_group.replace('-', '--') + escaped_name = share['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def _allocate_container(self, share_name, sizestr): + cmd = ['lvcreate', '-L', sizestr, '-n', share_name, + self.configuration.share_volume_group] + if self.configuration.share_lvm_mirrors: + cmd += ['-m', self.configuration.share_lvm_mirrors, '--nosync'] + terras = int(sizestr[:-1]) / 1024.0 + if terras >= 1.5: + rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) + # NOTE(vish): Next power of two for region size. See: + # http://red.ht/U2BPOD + cmd += ['-R', str(rsize)] + + self._try_execute(*cmd, run_as_root=True) + + def _deallocate_container(self, share_name): + """Deletes a logical volume for share.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._try_execute('lvremove', '-f', "%s/%s" % + (self.configuration.share_volume_group, + share_name), + run_as_root=True) + + def get_share_stats(self, refresh=False): + """Get share status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_share_status() + + return self._stats + + def _update_share_status(self): + """Retrieve status info from share volume group.""" + + LOG.debug(_("Updating share status")) + data = {} + + # Note(zhiteng): These information are driver/backend specific, + # each driver may define these values in its own config options + # or fetch from driver specific configuration file. + data["share_backend_name"] = 'LVM' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + #TODO(rushiagr): Pick storage_protocol from the helper used. + data["storage_protocol"] = 'NFS_CIFS' + + data['total_capacity_gb'] = 0 + data['free_capacity_gb'] = 0 + data['reserved_percentage'] = \ + self.configuration.reserved_share_percentage + data['QoS_support'] = False + + try: + out, err = self._execute('vgs', '--noheadings', '--nosuffix', + '--unit=G', '-o', 'name,size,free', + self.configuration.share_volume_group, + run_as_root=True) + except exception.ProcessExecutionError as exc: + LOG.error(_("Error retrieving volume status: %s") % exc.stderr) + out = False + + if out: + share = out.split() + data['total_capacity_gb'] = float(share[1]) + data['free_capacity_gb'] = float(share[2]) + + self._stats = data + + def deallocate_container(self, ctx, share): + """Remove LVM volume that will be represented as share.""" + self._deallocate_container(share['name']) + + def allocate_container(self, ctx, share): + """Create LVM volume that will be represented as share.""" + self._allocate_container(share['name'], '%sG' % share['size']) + #create file system + device_name = self._local_path(share) + self._execute('mkfs.ext4', device_name, run_as_root=True) + + def allocate_container_from_snapshot(self, context, share, snapshot): + """Is called to create share from snapshot.""" + self._allocate_container(share['name'], '%sG' % share['size']) + self._copy_volume(self._local_path(snapshot), self._local_path(share), + snapshot['share_size']) + + def create_export(self, ctx, share): + """Exports the volume. Can optionally return a Dictionary of changes + to the share object to be persisted.""" + device_name = self._local_path(share) + location = self._mount_device(share, device_name) + #TODO(rushiagr): what is the provider_location? realy needed? + return {'provider_location': location} + + def remove_export(self, ctx, share): + """Removes an access rules for a share.""" + mount_path = self._get_mount_path(share) + if os.path.exists(mount_path): + #umount, may be busy + try: + self._execute('umount', '-f', mount_path, run_as_root=True) + except exception.ProcessExecutionError, exc: + if 'device is busy' in str(exc): + raise exception.ShareIsBusy(share_name=share['name']) + else: + LOG.info('Unable to umount: %s', exc) + #remove dir + try: + os.rmdir(mount_path) + except OSError: + LOG.info('Unable to delete %s', mount_path) + + def create_share(self, ctx, share): + """Is called after allocate_space to create share on the volume.""" + location = self._get_mount_path(share) + location = self._get_helper(share).create_export(location, + share['name']) + return location + + def create_snapshot(self, context, snapshot): + """Creates a snapshot.""" + orig_lv_name = "%s/%s" % (self.configuration.share_volume_group, + snapshot['share_name']) + self._try_execute('lvcreate', '-L', '%sG' % snapshot['share_size'], + '--name', snapshot['name'], + '--snapshot', orig_lv_name, run_as_root=True) + + def ensure_share(self, ctx, share): + """Ensure that storage are mounted and exported.""" + device_name = self._local_path(share) + location = self._mount_device(share, device_name) + self._get_helper(share).create_export(location, share['name'], + recreate=True) + + def delete_share(self, ctx, share): + """Delete a share.""" + try: + location = self._get_mount_path(share) + self._get_helper(share).remove_export(location, share['name']) + except exception.ProcessExecutionError: + LOG.info("Can't remove share %r" % share['id']) + except exception.InvalidShare, exc: + LOG.info(exc.message) + + def delete_snapshot(self, context, snapshot): + """Deletes a snapshot.""" + self._deallocate_container(snapshot['name']) + + def allow_access(self, ctx, share, access): + """Allow access to the share.""" + location = self._get_mount_path(share) + self._get_helper(share).allow_access(location, share['name'], + access['access_type'], + access['access_to']) + + def deny_access(self, ctx, share, access): + """Allow access to the share.""" + location = self._get_mount_path(share) + self._get_helper(share).deny_access(location, share['name'], + access['access_type'], + access['access_to']) + + def _get_helper(self, share): + if share['share_proto'].startswith('NFS'): + return self._helpers['NFS'] + elif share['share_proto'].startswith('CIFS'): + return self._helpers['CIFS'] + else: + raise exception.InvalidShare(reason='Wrong share type') + + def _mount_device(self, share, device_name): + """Mount LVM share and ignore if already mounted.""" + mount_path = self._get_mount_path(share) + self._execute('mkdir', '-p', mount_path) + try: + self._execute('mount', device_name, mount_path, + run_as_root=True, check_exit_code=True) + self._execute('chmod', '777', mount_path, + run_as_root=True, check_exit_code=True) + except exception.ProcessExecutionError as exc: + if 'already mounted' in exc.stderr: + LOG.warn(_("%s is already mounted"), device_name) + else: + raise + return mount_path + + def _get_mount_path(self, share): + """Returns path where share is mounted.""" + return os.path.join(self.configuration.share_export_root, + share['name']) + + def _copy_volume(self, srcstr, deststr, size_in_g): + # Use O_DIRECT to avoid thrashing the system buffer cache + extra_flags = ['iflag=direct', 'oflag=direct'] + + # Check whether O_DIRECT is supported + try: + self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, + *extra_flags, run_as_root=True) + except exception.ProcessExecutionError: + extra_flags = [] + + # Perform the copy + self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M', + *extra_flags, run_as_root=True) + + +class NASHelperBase(object): + """Interface to work with share.""" + + def __init__(self, execute, config_object): + self.configuration = config_object + self._execute = execute + + def init(self): + pass + + def create_export(self, local_path, share_name, recreate=False): + """Create new export, delete old one if exists.""" + raise NotImplementedError() + + def remove_export(self, local_path, share_name): + """Remove export.""" + raise NotImplementedError() + + def allow_access(self, local_path, share_name, access_type, access): + """Allow access to the host.""" + raise NotImplementedError() + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Deny access to the host.""" + raise NotImplementedError() + + +class NFSHelper(NASHelperBase): + """Interface to work with share.""" + + def __init__(self, execute, config_object): + super(NFSHelper, self).__init__(execute, config_object) + try: + self._execute('exportfs', check_exit_code=True, + run_as_root=True) + except exception.ProcessExecutionError: + raise exception.Error('NFS server not found') + + def create_export(self, local_path, share_name, recreate=False): + """Create new export, delete old one if exists.""" + return ':'.join([self.configuration.share_export_ip, local_path]) + + def remove_export(self, local_path, share_name): + """Remove export.""" + pass + + def allow_access(self, local_path, share_name, access_type, access): + """Allow access to the host""" + if access_type != 'ip': + reason = 'only ip access type allowed' + raise exception.InvalidShareAccess(reason) + #check if presents in export + out, _ = self._execute('exportfs', run_as_root=True) + out = re.search(re.escape(local_path) + '[\s\n]*' + re.escape(access), + out) + if out is not None: + raise exception.ShareAccessExists(access_type=access_type, + access=access) + + self._execute('exportfs', '-o', 'rw,no_subtree_check', + ':'.join([access, local_path]), run_as_root=True, + check_exit_code=True) + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Deny access to the host.""" + self._execute('exportfs', '-u', ':'.join([access, local_path]), + run_as_root=True, check_exit_code=False) + + +class CIFSHelper(NASHelperBase): + """Class provides functionality to operate with cifs shares""" + + def __init__(self, execute, config_object): + """Store executor and configuration path.""" + super(CIFSHelper, self).__init__(execute, config_object) + self.config = self.configuration.smb_config_path + self.test_config = "%s_" % (self.config,) + + def init(self): + """Initialize environment.""" + self._recreate_config() + self._ensure_daemon_started() + + def create_export(self, local_path, share_name, recreate=False): + """Create new export, delete old one if exists.""" + parser = ConfigParser.ConfigParser() + parser.read(self.config) + #delete old one + if parser.has_section(share_name): + if recreate: + parser.remove_section(share_name) + else: + raise exception.Error('Section exists') + #Create new one + parser.add_section(share_name) + parser.set(share_name, 'path', local_path) + parser.set(share_name, 'browseable', 'yes') + parser.set(share_name, 'guest ok', 'yes') + parser.set(share_name, 'read only', 'no') + parser.set(share_name, 'writable', 'yes') + parser.set(share_name, 'create mask', '0755') + parser.set(share_name, 'hosts deny', '0.0.0.0/0') # denying all ips + parser.set(share_name, 'hosts allow', '127.0.0.1') + #NOTE(rushiagr): ensure that local_path dir is existing + if not os.path.exists(local_path): + os.makedirs(local_path) + self._execute('chown', 'nobody', '-R', local_path, run_as_root=True) + self._update_config(parser) + return '//%s/%s' % (self.configuration.share_export_ip, share_name) + + def remove_export(self, local_path, share_name): + """Remove export.""" + parser = ConfigParser.ConfigParser() + parser.read(self.config) + #delete old one + if parser.has_section(share_name): + parser.remove_section(share_name) + self._update_config(parser) + self._execute('smbcontrol', 'all', 'close-share', share_name, + run_as_root=True) + + def allow_access(self, local_path, share_name, access_type, access): + """Allow access to the host.""" + if access_type != 'ip': + reason = 'only ip access type allowed' + raise exception.InvalidShareAccess(reason) + parser = ConfigParser.ConfigParser() + parser.read(self.config) + + hosts = parser.get(share_name, 'hosts allow') + if access in hosts.split(): + raise exception.ShareAccessExists(access_type=access_type, + access=access) + hosts += ' %s' % (access,) + parser.set(share_name, 'hosts allow', hosts) + self._update_config(parser) + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Deny access to the host.""" + parser = ConfigParser.ConfigParser() + try: + parser.read(self.config) + hosts = parser.get(share_name, 'hosts allow') + hosts = hosts.replace(' %s' % (access,), '', 1) + parser.set(share_name, 'hosts allow', hosts) + self._update_config(parser) + except ConfigParser.NoSectionError: + if not force: + raise + + def _ensure_daemon_started(self): + """ + FYI: smbd starts at least two processes. + """ + out, _ = self._execute(*'ps -C smbd -o args='.split(), + check_exit_code=False) + processes = [process.strip() for process in out.split('\n') + if process.strip()] + + cmd = 'smbd -s %s -D' % (self.config,) + + running = False + for process in processes: + if not process.endswith(cmd): + #alternatively exit + raise exception.Error('smbd already started with wrong config') + running = True + + if not running: + self._execute(*cmd.split(), run_as_root=True) + + def _recreate_config(self): + """create new SAMBA configuration file.""" + if os.path.exists(self.config): + os.unlink(self.config) + parser = ConfigParser.ConfigParser() + parser.add_section('global') + parser.set('global', 'security', 'user') + parser.set('global', 'server string', '%h server (Samba, Openstack)') + + self._update_config(parser, restart=False) + + def _update_config(self, parser, restart=True): + """Check if new configuration is correct and save it.""" + #Check that configuration is correct + with open(self.test_config, 'w') as fp: + parser.write(fp) + self._execute('testparm', '-s', self.test_config, + check_exit_code=True) + #save it + with open(self.config, 'w') as fp: + parser.write(fp) + #restart daemon if necessary + if restart: + self._execute(*'pkill -HUP smbd'.split(), run_as_root=True) + + +class CIFSNetConfHelper(NASHelperBase): + """Manage shares in samba server by net conf tool. + + Class provides functionality to operate with CIFS shares. Samba + server should be configured to use registry as configuration + backend to allow dynamically share managements. There are two ways + to done that, one of them is to add specific parameter in the + global configuration section at smb.conf: + + [global] + include = registry + + For more inforation see smb.conf(5). + """ + + def create_export(self, local_path, share_name, recreate=False): + """Create share at samba server.""" + create_cmd = ('net', 'conf', 'addshare', share_name, local_path, + 'writeable=y', 'guest_ok=y') + try: + self._execute(*create_cmd, run_as_root=True) + except exception.ProcessExecutionError as e: + if 'already exists' in e.stderr: + if recreate: + self._execute('net', 'conf', 'delshare', share_name, + run_as_root=True) + self._execute(*create_cmd, run_as_root=True) + else: + msg = _('Share section %r already defined.') % (share_name) + raise exception.ShareBackendException(msg=msg) + else: + raise + parameters = { + 'browseable': 'yes', + 'create mask': '0755', + 'hosts deny': '0.0.0.0/0', # deny all + 'hosts allow': '127.0.0.1', + } + for name, value in parameters.items(): + self._execute('net', 'conf', 'setparm', share_name, name, value, + run_as_root=True) + return '//%s/%s' % (self.configuration.share_export_ip, share_name) + + def remove_export(self, local_path, share_name): + """Remove share definition from samba server.""" + try: + self._execute('net', 'conf', 'delshare', share_name, + run_as_root=True) + except exception.ProcessExecutionError as e: + if 'SBC_ERR_NO_SUCH_SERVICE' not in e.stderr: + raise + self._execute('smbcontrol', 'all', 'close-share', share_name, + run_as_root=True) + + def allow_access(self, local_path, share_name, access_type, access): + """Add to allow hosts additional access rule.""" + if access_type != 'ip': + reason = _('only ip access type allowed') + raise exception.InvalidShareAccess(reason=reason) + + hosts = self._get_allow_hosts(share_name) + if access in hosts: + raise exception.ShareAccessExists(access_type=access_type, + access=access) + hosts.append(access) + self._set_allow_hosts(hosts, share_name) + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Remove from allow hosts permit rule.""" + try: + hosts = self._get_allow_hosts(share_name) + hosts.remove(access) + self._set_allow_hosts(hosts, share_name) + except exception.ProcessExecutionError as e: + if not ('does not exist' in e.stdout and force): + raise + + def _get_allow_hosts(self, share_name): + (out, _) = self._execute('net', 'conf', 'getparm', share_name, + 'hosts allow', run_as_root=True) + return out.split() + + def _set_allow_hosts(self, hosts, share_name): + value = ' '.join(hosts) + self._execute('net', 'conf', 'setparm', share_name, 'hosts allow', + value, run_as_root=True) diff --git a/cinder/share/drivers/netapp.py b/cinder/share/drivers/netapp.py new file mode 100644 index 0000000000..6baa83904a --- /dev/null +++ b/cinder/share/drivers/netapp.py @@ -0,0 +1,745 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +NetApp specific NAS storage driver. Supports NFS and CIFS protocols. + +This driver requires NetApp OnCommand 5.0 and one or more Data +ONTAP 7-mode storage systems with installed CIFS and NFS licenses. +""" +import suds +from suds.sax import text + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log +from cinder.share import driver + +from oslo.config import cfg + + +LOG = log.getLogger(__name__) + +NETAPP_NAS_OPTS = [ + cfg.StrOpt('netapp_nas_wsdl_url', + default=None, + help='URL of the WSDL file for the DFM server'), + cfg.StrOpt('netapp_nas_login', + default=None, + help='User name for the DFM server'), + cfg.StrOpt('netapp_nas_password', + default=None, + help='Password for the DFM server'), + cfg.StrOpt('netapp_nas_server_hostname', + default=None, + help='Hostname for the DFM server'), + cfg.IntOpt('netapp_nas_server_port', + default=8088, + help='Port number for the DFM server'), + cfg.BoolOpt('netapp_nas_server_secure', + default=True, + help='Use secure connection to server.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(NETAPP_NAS_OPTS) + + +class NetAppShareDriver(driver.ShareDriver): + """ + NetApp specific NAS driver. Allows for NFS and CIFS NAS storage usage. + """ + + def __init__(self, db, *args, **kwargs): + super(NetAppShareDriver, self).__init__(*args, **kwargs) + self.db = db + self._helpers = None + self._share_table = {} + self.configuration.append_config_values(NETAPP_NAS_OPTS) + self._client = NetAppApiClient(self.configuration) + + def allocate_container(self, context, share): + """Allocate space for the share on aggregates.""" + aggregate = self._find_best_aggregate() + filer = aggregate.FilerId + self._allocate_share_space(aggregate, share) + self._remember_share(share['id'], filer) + + def allocate_container_from_snapshot(self, context, share, snapshot): + """Creates a share from a snapshot.""" + share_name = _get_valid_share_name(share['id']) + parent_share_name = _get_valid_share_name(snapshot['share_id']) + parent_snapshot_name = _get_valid_snapshot_name(snapshot['id']) + + filer = self._get_filer(snapshot['share_id']) + + xml_args = ('%s' + '%s' + '%s') % \ + (share_name, parent_share_name, parent_snapshot_name) + self._client.send_request_to(filer, 'volume-clone-create', xml_args) + self._remember_share(share['id'], filer) + + def deallocate_container(self, context, share): + """Free share space.""" + target = self._get_filer(share['id']) + if target: + self._share_offline(target, share) + self._delete_share(target, share) + self._forget_share(share['id']) + + def create_share(self, context, share): + """Creates NAS storage.""" + helper = self._get_helper(share) + filer = self._get_filer(share['id']) + export_location = helper.create_share(filer, share) + return export_location + + def create_snapshot(self, context, snapshot): + """Creates a snapshot of a share.""" + share_name = _get_valid_share_name(snapshot['share_id']) + snapshot_name = _get_valid_snapshot_name(snapshot['id']) + + filer = self._get_filer(snapshot['share_id']) + + xml_args = ('%s' + '%s') % (share_name, snapshot_name) + self._client.send_request_to(filer, 'snapshot-create', xml_args) + + def delete_share(self, context, share): + """Deletes NAS storage.""" + helper = self._get_helper(share) + target = helper.get_target(share) + # share may be in error state, so there's no share and target + if target: + helper.delete_share(share) + + def delete_snapshot(self, context, snapshot): + """Deletes a snapshot of a share.""" + share_name = _get_valid_share_name(snapshot['share_id']) + snapshot_name = _get_valid_snapshot_name(snapshot['id']) + + filer = self._get_filer(snapshot['share_id']) + + self._is_snapshot_busy(filer, share_name, snapshot_name) + xml_args = ('%s' + '%s') % (snapshot_name, share_name) + self._client.send_request_to(filer, 'snapshot-delete', xml_args) + + def create_export(self, context, share): + """Share already exported.""" + pass + + def remove_export(self, context, share): + """Share already removed.""" + pass + + def ensure_share(self, context, share): + """Remember previously created shares.""" + helper = self._get_helper(share) + filer = helper.get_target(share) + self._remember_share(share['id'], filer) + + def allow_access(self, context, share, access): + """Allows access to a given NAS storage for IPs in :access:""" + helper = self._get_helper(share) + return helper.allow_access(context, share, access) + + def deny_access(self, context, share, access): + """Denies access to a given NAS storage for IPs in :access:""" + helper = self._get_helper(share) + return helper.deny_access(context, share, access) + + def do_setup(self, context): + """Prepare once the driver. + + Called once by the manager after the driver is loaded. + Validate the flags we care about and setup the suds (web + services) client. + """ + self._client.do_setup() + self._setup_helpers() + + def check_for_setup_error(self): + """Raises error if prerequisites are not met.""" + self._client.check_configuration(self.configuration) + + def _get_filer(self, share_id): + """Returns filer name for the share_id.""" + try: + return self._share_table[share_id] + except KeyError: + return + + def _remember_share(self, share_id, filer): + """Stores required share info in local state.""" + self._share_table[share_id] = filer + + def _forget_share(self, share_id): + """Remove share info about share.""" + try: + self._share_table.pop(share_id) + except KeyError: + pass + + def _share_offline(self, target, share): + """Sends share offline. Required before deleting a share.""" + share_name = _get_valid_share_name(share['id']) + xml_args = ('%s') % share_name + self._client.send_request_to(target, 'volume-offline', xml_args) + + def _delete_share(self, target, share): + """Destroys share on a target OnTap device.""" + share_name = _get_valid_share_name(share['id']) + xml_args = ('true' + '%s') % share_name + self._client.send_request_to(target, 'volume-destroy', xml_args) + + def _setup_helpers(self): + """Initializes protocol-specific NAS drivers.""" + #TODO(rushiagr): better way to handle configuration instead of just + # passing to the helper + self._helpers = { + 'CIFS': NetAppCIFSHelper(self._client, + self.configuration), + 'NFS': NetAppNFSHelper(self._client, + self.configuration), + } + + def _get_helper(self, share): + """Returns driver which implements share protocol.""" + share_proto = share['share_proto'] + + for proto in self._helpers.keys(): + if share_proto.upper().startswith(proto): + return self._helpers[proto] + + err_msg = _("Invalid NAS protocol supplied: %s. ") % (share_proto) + + raise exception.Error(err_msg) + + def _find_best_aggregate(self): + """Returns aggregate with the most free space left.""" + aggrs = self._client.get_available_aggregates() + if aggrs is None: + raise exception.Error(_("No aggregates available")) + + best_aggregate = max(aggrs.Aggregates.AggregateInfo, + key=lambda ai: ai.AggregateSize.SizeAvailable) + return best_aggregate + + def _allocate_share_space(self, aggregate, share): + """Create new share on aggregate.""" + filer_id = aggregate.FilerId + aggr_name = aggregate.AggregateName.split(':')[1] + share_name = _get_valid_share_name(share['id']) + args_xml = ('%s' + '%dg' + '%s') % (aggr_name, share['size'], + share_name) + self._client.send_request_to(filer_id, 'volume-create', args_xml) + + def _is_snapshot_busy(self, filer, share_name, snapshot_name): + """Raises ShareSnapshotIsBusy if snapshot is busy.""" + xml_args = ('%s') % share_name + snapshots = self._client.send_request_to(filer, + 'snapshot-list-info', + xml_args, + do_response_check=False) + + for snap in snapshots.Results.snapshots[0]['snapshot-info']: + if snap['name'][0] == snapshot_name and snap['busy'][0] == 'true': + raise exception.ShareSnapshotIsBusy( + snapshot_name=snapshot_name) + + def get_share_stats(self, refresh=False): + """Get share status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_share_status() + + return self._stats + + def _update_share_status(self): + """Retrieve status info from share volume group.""" + + LOG.debug(_("Updating share status")) + data = {} + + # Note(zhiteng): These information are driver/backend specific, + # each driver may define these values in its own config options + # or fetch from driver specific configuration file. + data["share_backend_name"] = 'NetApp_7_mode' + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + #TODO(rushiagr): Pick storage_protocol from the helper used. + data["storage_protocol"] = 'NFS_CIFS' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 0 + data['QoS_support'] = False + + self._stats = data + + +def _check_response(request, response): + """Checks RPC responses from NetApp devices.""" + if response.Status == 'failed': + name = request.Name + reason = response.Reason + msg = _('API %(name)s failed: %(reason)s') + raise exception.Error(msg % locals()) + + +def _get_valid_share_name(share_id): + """The name can contain letters, numbers, and the underscore + character (_). The first character must be a letter or an + underscore.""" + return 'share_' + share_id.replace('-', '_') + + +def _get_valid_snapshot_name(snapshot_id): + """The name can contain letters, numbers, and the underscore + character (_). The first character must be a letter or an + underscore.""" + return 'share_snapshot_' + snapshot_id.replace('-', '_') + + +class NetAppApiClient(object): + """Wrapper around DFM commands.""" + + REQUIRED_FLAGS = ['netapp_nas_wsdl_url', + 'netapp_nas_login', + 'netapp_nas_password', + 'netapp_nas_server_hostname', + 'netapp_nas_server_port'] + + def __init__(self, configuration): + self.configuration = configuration + self._client = None + + def do_setup(self): + """Setup suds (web services) client.""" + protocol = 'https' if self.configuration.netapp_nas_server_secure \ + else 'http' + soap_url = ('%s://%s:%s/apis/soap/v1' % + (protocol, + self.configuration.netapp_nas_server_hostname, + self.configuration.netapp_nas_server_port)) + + self._client = \ + suds.client.Client(self.configuration.netapp_nas_wsdl_url, + username=self.configuration.netapp_nas_login, + password=self.configuration.netapp_nas_password, + location=soap_url) + + LOG.info('NetApp RPC client started') + + def send_request_to(self, target, request, xml_args=None, + do_response_check=True): + """ + Sends RPC :request: to :target:. + :param target: IP address, ID or network name of OnTap device + :param request: API name + :param xml_args: call arguments + :param do_response_check: if set to True and RPC call has failed, + raises exception. + """ + client = self._client + srv = client.service + + rpc = client.factory.create('Request') + rpc.Name = request + rpc.Args = text.Raw(xml_args) + response = srv.ApiProxy(Request=rpc, Target=target) + + if do_response_check: + _check_response(rpc, response) + + return response + + def get_available_aggregates(self): + """Returns list of aggregates known by DFM.""" + srv = self._client.service + resp = srv.AggregateListInfoIterStart() + tag = resp.Tag + + try: + avail_aggrs = srv.AggregateListInfoIterNext(Tag=tag, + Maximum=resp.Records) + finally: + srv.AggregateListInfoIterEnd(tag) + + return avail_aggrs + + def get_host_ip_by(self, host_id): + """Returns IP address of a host known by DFM.""" + if (type(host_id) is str or type(host_id) is unicode) and \ + len(host_id.split('.')) == 4: + # already IP + return host_id + + client = self._client + srv = client.service + + filer_filter = client.factory.create('HostListInfoIterStart') + filer_filter.ObjectNameOrId = host_id + resp = srv.HostListInfoIterStart(HostListInfoIterStart=filer_filter) + tag = resp.Tag + + try: + filers = srv.HostListInfoIterNext(Tag=tag, Maximum=resp.Records) + finally: + srv.HostListInfoIterEnd(Tag=tag) + + ip = None + for host in filers.Hosts.HostInfo: + if int(host.HostId) == int(host_id): + ip = host.HostAddress + + return ip + + @staticmethod + def check_configuration(config_object): + """Ensure that the flags we care about are set.""" + for flag in NetAppApiClient.REQUIRED_FLAGS: + if not getattr(config_object, flag, None): + raise exception.Error(_('%s is not set') % flag) + + +class NetAppNASHelperBase(object): + """Interface for protocol-specific NAS drivers.""" + def __init__(self, suds_client, config_object): + self.configuration = config_object + self._client = suds_client + + def create_share(self, target_id, share): + """Creates NAS share.""" + raise NotImplementedError() + + def delete_share(self, share): + """Deletes NAS share.""" + raise NotImplementedError() + + def allow_access(self, context, share, new_rules): + """Allows new_rules to a given NAS storage for IPs in :new_rules.""" + raise NotImplementedError() + + def deny_access(self, context, share, new_rules): + """Denies new_rules to a given NAS storage for IPs in :new_rules:.""" + raise NotImplementedError() + + def get_target(self, share): + """Returns host where the share located..""" + raise NotImplementedError() + + +class NetAppNFSHelper(NetAppNASHelperBase): + """Netapp specific NFS sharing driver.""" + + def __init__(self, suds_client, config_object): + self.configuration = config_object + super(NetAppNFSHelper, self).__init__(suds_client, config_object) + + def create_share(self, target_id, share): + """Creates NFS share""" + args_xml = ('' + '' + '%s' + '' + '' + '' + '' + 'localhost' + '' + '' + '' + '' + 'false' + 'localhost' + '' + '' + '' + '' + '' + '') + + client = self._client + valid_share_name = _get_valid_share_name(share['id']) + export_pathname = '/vol/' + valid_share_name + + client.send_request_to(target_id, 'nfs-exportfs-append-rules-2', + args_xml % export_pathname) + + export_ip = client.get_host_ip_by(target_id) + export_location = ':'.join([export_ip, export_pathname]) + return export_location + + def delete_share(self, share): + """Deletes NFS share.""" + target, export_path = self._get_export_path(share) + + xml_args = ('' + '' + '%s' + '' + '') % export_path + + self._client.send_request_to(target, 'nfs-exportfs-delete-rules', + xml_args) + + def allow_access(self, context, share, access): + """Allows access to a given NFS storage for IPs in :access:.""" + if access['access_type'] != 'ip': + raise exception.Error(('Invalid access type supplied. ' + 'Only \'ip\' type is supported')) + + ips = access['access_to'] + + existing_rules = self._get_exisiting_rules(share) + new_rules_xml = self._append_new_rules_to(existing_rules, ips) + + self._modify_rule(share, new_rules_xml) + + def deny_access(self, context, share, access): + """Denies access to a given NFS storage for IPs in :access:.""" + denied_ips = access['access_to'] + existing_rules = self._get_exisiting_rules(share) + + if type(denied_ips) is not list: + denied_ips = [denied_ips] + + for deny_rule in denied_ips: + try: + existing_rules.remove(deny_rule) + except ValueError: + pass + + new_rules_xml = self._append_new_rules_to([], existing_rules) + self._modify_rule(share, new_rules_xml) + + def get_target(self, share): + """Returns ID of target OnTap device based on export location.""" + return self._get_export_path(share)[0] + + def _modify_rule(self, share, rw_rules): + """Modifies access rule for a share.""" + target, export_path = self._get_export_path(share) + + xml_args = ('true' + '' + '' + '%s' + '%s' + '' + '' + '') % (export_path, ''.join(rw_rules)) + + self._client.send_request_to(target, 'nfs-exportfs-append-rules-2', + xml_args) + + def _get_exisiting_rules(self, share): + """Returns available access rules for the share.""" + target, export_path = self._get_export_path(share) + xml_args = '%s' % export_path + + response = self._client.send_request_to(target, + 'nfs-exportfs-list-rules-2', + xml_args) + + rules = response.Results.rules[0] + security_rule = rules['exports-rule-info-2'][0]['security-rules'][0] + security_info = security_rule['security-rule-info'][0] + root_rules = security_info['root'][0] + allowed_hosts = root_rules['exports-hostname-info'] + + existing_rules = [] + + for allowed_host in allowed_hosts: + if 'name' in allowed_host: + existing_rules.append(allowed_host['name'][0]) + + return existing_rules + + @staticmethod + def _append_new_rules_to(existing_rules, new_rules): + """Adds new rules to existing.""" + security_rule_xml = ('' + '%s' + '' + '%s' + '' + '') + + hostname_info_xml = ('' + '%s' + '') + + allowed_hosts_xml = [] + + if type(new_rules) is not list: + new_rules = [new_rules] + + all_rules = existing_rules + new_rules + + for ip in all_rules: + allowed_hosts_xml.append(hostname_info_xml % ip) + + return security_rule_xml % (allowed_hosts_xml, allowed_hosts_xml) + + @staticmethod + def _get_export_path(share): + """Returns IP address and export location of a share.""" + export_location = share['export_location'] + + if export_location is None: + export_location = ':' + + return export_location.split(':') + + +class NetAppCIFSHelper(NetAppNASHelperBase): + """Netapp specific NFS sharing driver.""" + + CIFS_USER_GROUP = 'Administrators' + + def __init__(self, suds_client, config_object): + self.configuration = config_object + super(NetAppCIFSHelper, self).__init__(suds_client, config_object) + + def create_share(self, target_id, share): + """Creates CIFS storage.""" + cifs_status = self._get_cifs_status(target_id) + + if cifs_status == 'stopped': + self._start_cifs_service(target_id) + + share_name = _get_valid_share_name(share['id']) + + self._set_qtree_security(target_id, share) + self._add_share(target_id, share_name) + self._restrict_access(target_id, 'everyone', share_name) + + ip_address = self._client.get_host_ip_by(target_id) + + cifs_location = self._set_export_location(ip_address, share_name) + + return cifs_location + + def delete_share(self, share): + """Deletes CIFS storage.""" + host_ip, share_name = self._get_export_location(share) + xml_args = '%s' % share_name + self._client.send_request_to(host_ip, 'cifs-share-delete', xml_args) + + def allow_access(self, context, share, access): + """Allows access to a given CIFS storage for IPs in :access:.""" + if access['access_type'] != 'passwd': + ex_text = ('NetApp only supports "passwd" access type for CIFS.') + raise exception.Error(ex_text) + + user = access['access_to'] + target, share_name = self._get_export_location(share) + + if self._user_exists(target, user): + self._allow_access_for(target, user, share_name) + else: + exc_text = ('User "%s" does not exist on %s OnTap.') % (user, + target) + raise exception.Error(exc_text) + + def deny_access(self, context, share, access): + """Denies access to a given CIFS storage for IPs in access.""" + host_ip, share_name = self._get_export_location(share) + user = access['access_to'] + + self._restrict_access(host_ip, user, share_name) + + def get_target(self, share): + """Returns OnTap target IP based on share export location.""" + return self._get_export_location(share)[0] + + def _set_qtree_security(self, target, share): + client = self._client + share_name = '/vol/' + _get_valid_share_name(share['id']) + + xml_args = ('' + 'qtree' + 'security' + '%s' + 'mixed' + '') % share_name + + client.send_request_to(target, 'system-cli', xml_args) + + def _restrict_access(self, target, user_name, share_name): + xml_args = ('%s' + '%s') % (user_name, share_name) + self._client.send_request_to(target, 'cifs-share-ace-delete', + xml_args) + + def _start_cifs_service(self, target_id): + """Starts CIFS service on OnTap target.""" + client = self._client + return client.send_request_to(target_id, 'cifs-start', + do_response_check=False) + + @staticmethod + def _get_export_location(share): + """Returns export location for a given CIFS share.""" + export_location = share['export_location'] + + if export_location is None: + export_location = '///' + + _, _, host_ip, share_name = export_location.split('/') + return host_ip, share_name + + @staticmethod + def _set_export_location(ip, share_name): + """Returns export location of a share.""" + return "//%s/%s" % (ip, share_name) + + def _get_cifs_status(self, target_id): + """Returns status of a CIFS service on target OnTap.""" + client = self._client + response = client.send_request_to(target_id, 'cifs-status') + return response.Status + + def _allow_access_for(self, target, username, share_name): + """Allows access to the CIFS share for a given user.""" + xml_args = ('rwx' + '%s' + '%s') % (share_name, username) + self._client.send_request_to(target, 'cifs-share-ace-set', xml_args) + + def _user_exists(self, target, user): + """Returns True if user already exists on a target OnTap.""" + xml_args = ('%s') % user + resp = self._client.send_request_to(target, + 'useradmin-user-list', + xml_args, + do_response_check=False) + + return (resp.Status == 'passed') + + def _add_share(self, target_id, share_name): + """Creates CIFS share on target OnTap host.""" + client = self._client + xml_args = ('/vol/%s' + '%s') % (share_name, share_name) + client.send_request_to(target_id, 'cifs-share-add', xml_args) diff --git a/cinder/share/manager.py b/cinder/share/manager.py new file mode 100644 index 0000000000..18232f0446 --- /dev/null +++ b/cinder/share/manager.py @@ -0,0 +1,221 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""NAS share manager managers creating shares and access rights. + +**Related Flags** + +:share_driver: Used by :class:`ShareManager`. Defaults to + :class:`cinder.share.drivers.lvm.LVMShareDriver`. +""" + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.share.configuration import Configuration + +from oslo.config import cfg + +LOG = logging.getLogger(__name__) + +share_manager_opts = [ + cfg.StrOpt('share_driver', + default='cinder.share.drivers.lvm.LVMShareDriver', + help='Driver to use for share creation'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(share_manager_opts) + + +class ShareManager(manager.SchedulerDependentManager): + """Manages NAS storages.""" + + RPC_API_VERSION = '1.1' + + def __init__(self, share_driver=None, service_name=None, *args, **kwargs): + """Load the driver from args, or from flags.""" + self.configuration = Configuration(share_manager_opts, + config_group=service_name) + super(ShareManager, self).__init__(service_name='share', + *args, **kwargs) + if not share_driver: + share_driver = self.configuration.share_driver + self.driver = importutils.import_object( + share_driver, + self.db, + configuration=self.configuration) + + def init_host(self): + """Initialization for a standalone service.""" + + ctxt = context.get_admin_context() + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + + shares = self.db.share_get_all_by_host(ctxt, self.host) + LOG.debug(_("Re-exporting %s shares"), len(shares)) + for share in shares: + if share['status'] in ['available', 'in-use']: + self.driver.ensure_share(ctxt, share) + rules = self.db.share_access_get_all_for_share(ctxt, + share['id']) + for access_ref in rules: + if access_ref['state'] == access_ref.STATE_ACTIVE: + try: + self.driver.allow_access(ctxt, share, + access_ref) + except exception.ShareAccessExists: + pass + else: + LOG.info(_("share %s: skipping export"), share['name']) + + self.publish_service_capabilities(ctxt) + + def create_share(self, context, share_id, request_spec=None, + filter_properties=None, snapshot_id=None): + """Creates a share.""" + context = context.elevated() + if filter_properties is None: + filter_properties = {} + + share_ref = self.db.share_get(context, share_id) + if snapshot_id is not None: + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + else: + snapshot_ref = None + + try: + if snapshot_ref: + self.driver.allocate_container_from_snapshot(context, + share_ref, + snapshot_ref) + else: + self.driver.allocate_container(context, share_ref) + export_location = self.driver.create_share(context, share_ref) + self.db.share_update(context, share_id, + {'export_location': export_location}) + self.driver.create_export(context, share_ref) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_update(context, share_id, {'status': 'error'}) + else: + self.db.share_update(context, share_id, + {'status': 'available', + 'launched_at': timeutils.utcnow()}) + + def delete_share(self, context, share_id): + """Delete a share.""" + share_ref = self.db.share_get(context, share_id) + rules = self.db.share_access_get_all_for_share(context, share_id) + try: + for access_ref in rules: + self._deny_access(context, access_ref, share_ref) + self.driver.remove_export(context, share_ref) + self.driver.delete_share(context, share_ref) + self.driver.deallocate_container(context, share_ref) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_update(context, share_id, + {'status': 'error_deleting'}) + else: + self.db.share_delete(context, share_id) + + def create_snapshot(self, context, share_id, snapshot_id): + """Create snapshot for share.""" + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + + try: + snap_name = snapshot_ref['name'] + model_update = self.driver.create_snapshot(context, snapshot_ref) + if model_update: + self.db.share_snapshot_update(context, snapshot_ref['id'], + model_update) + + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_snapshot_update(context, + snapshot_ref['id'], + {'status': 'error'}) + + self.db.share_snapshot_update(context, + snapshot_ref['id'], + {'status': 'available', + 'progress': '100%'}) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Delete share snapshot.""" + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + + try: + self.driver.delete_snapshot(context, snapshot_ref) + except exception.SnapshotIsBusy: + self.db.share_snapshot_update(context, snapshot_ref['id'], + {'status': 'available'}) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_snapshot_update(context, snapshot_ref['id'], + {'status': 'error_deleting'}) + else: + self.db.share_snapshot_destroy(context, snapshot_id) + + def allow_access(self, context, access_id): + """Allow access to some share.""" + try: + access_ref = self.db.share_access_get(context, access_id) + share_ref = self.db.share_get(context, access_ref['share_id']) + if access_ref['state'] == access_ref.STATE_NEW: + self.driver.allow_access(context, share_ref, access_ref) + self.db.share_access_update( + context, access_id, {'state': access_ref.STATE_ACTIVE}) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_access_update( + context, access_id, {'state': access_ref.STATE_ERROR}) + + def deny_access(self, context, access_id): + """Deny access to some share.""" + access_ref = self.db.share_access_get(context, access_id) + share_ref = self.db.share_get(context, access_ref['share_id']) + self._deny_access(context, access_ref, share_ref) + + def _deny_access(self, context, access_ref, share_ref): + access_id = access_ref['id'] + try: + self.driver.deny_access(context, share_ref, access_ref) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_access_update( + context, access_id, {'state': access_ref.STATE_ERROR}) + self.db.share_access_delete(context, access_id) + + @manager.periodic_task + def _report_driver_status(self, context): + LOG.info(_('Updating share status')) + share_stats = self.driver.get_share_stats(refresh=True) + if share_stats: + self.update_service_capabilities(share_stats) + + def publish_service_capabilities(self, context): + """Collect driver status and then publish it.""" + self._report_driver_status(context) + self._publish_service_capabilities(context) diff --git a/cinder/share/rpcapi.py b/cinder/share/rpcapi.py new file mode 100644 index 0000000000..4347ebc7b3 --- /dev/null +++ b/cinder/share/rpcapi.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Intel, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the share RPC API. +""" + +from cinder import exception +from cinder import flags +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +FLAGS = flags.FLAGS + + +class ShareAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the share rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Add snapshot support. + 1.2 - Add filter scheduler support + ''' + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic=None): + super(ShareAPI, self).__init__( + topic=topic or FLAGS.share_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_share(self, ctxt, share, host, + request_spec, filter_properties, + snapshot_id=None): + self.cast(ctxt, + self.make_msg('create_share', + share_id=share['id'], + request_spec=request_spec, + filter_properties=filter_properties, + snapshot_id=snapshot_id), + topic=rpc.queue_get_for(ctxt, + self.topic, + host)) + + def delete_share(self, ctxt, share): + self.cast(ctxt, + self.make_msg('delete_share', + share_id=share['id']), + topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) + + def create_snapshot(self, ctxt, share, snapshot): + self.cast(ctxt, + self.make_msg('create_snapshot', + share_id=share['id'], + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) + + def delete_snapshot(self, ctxt, snapshot, host): + self.cast(ctxt, + self.make_msg('delete_snapshot', + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, host)) + + def allow_access(self, ctxt, share, access): + self.cast(ctxt, self.make_msg('allow_access', access_id=access['id']), + topic=rpc.queue_get_for(ctxt, + self.topic, + share['host'])) + + def deny_access(self, ctxt, share, access): + self.cast(ctxt, self.make_msg('deny_access', access_id=access['id']), + topic=rpc.queue_get_for(ctxt, + self.topic, + share['host'])) + + def publish_service_capabilities(self, ctxt): + self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), + version='1.0') diff --git a/cinder/test.py b/cinder/test.py new file mode 100644 index 0000000000..967eadca37 --- /dev/null +++ b/cinder/test.py @@ -0,0 +1,285 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base classes for our unit tests. + +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" + +import functools +import unittest +import uuid + +import mox +import nose.plugins.skip +from oslo.config import cfg +import stubout + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import service +from cinder import tests +from cinder.tests import fake_flags + +test_opts = [ + cfg.StrOpt('sqlite_clean_db', + default='clean.sqlite', + help='File name of clean sqlite db'), + cfg.BoolOpt('fake_tests', + default=True, + help='should we use everything for testing'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(test_opts) + +LOG = logging.getLogger(__name__) + + +class skip_test(object): + """Decorator that skips a test.""" + # TODO(tr3buchet): remember forever what comstud did here + def __init__(self, msg): + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + raise nose.SkipTest(self.message) + return _skipper + + +class skip_if(object): + """Decorator that skips a test if condition is true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +class skip_unless(object): + """Decorator that skips a test if condition is not true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if not self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +def skip_if_fake(func): + """Decorator that skips a test if running in fake mode.""" + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if FLAGS.fake_tests: + raise unittest.SkipTest('Test cannot be run in fake mode') + else: + return func(*args, **kw) + return _skipper + + +class TestingException(Exception): + pass + + +class TestCase(unittest.TestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + """Run before each test method to initialize test environment.""" + super(TestCase, self).setUp() + + fake_flags.set_defaults(FLAGS) + flags.parse_args([], default_config_files=[]) + + # NOTE(vish): We need a better method for creating fixtures for tests + # now that we have some required db setup for the system + # to work properly. + self.start = timeutils.utcnow() + tests.reset_db() + + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = stubout.StubOutForTesting() + self.injected = [] + self._services = [] + FLAGS.set_override('fatal_exception_format_errors', True) + + def tearDown(self): + """Runs after each test method to tear down test environment.""" + try: + self.mox.UnsetStubs() + self.stubs.UnsetAll() + self.stubs.SmartUnsetAll() + self.mox.VerifyAll() + super(TestCase, self).tearDown() + finally: + # Reset any overridden flags + FLAGS.reset() + + # Stop any timers + for x in self.injected: + try: + x.stop() + except AssertionError: + pass + + # Kill any services + for x in self._services: + try: + x.kill() + except Exception: + pass + + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + + def flags(self, **kw): + """Override flag variables for a test.""" + for k, v in kw.iteritems(): + FLAGS.set_override(k, v) + + def start_service(self, name, host=None, **kwargs): + host = host and host or uuid.uuid4().hex + kwargs.setdefault('host', host) + kwargs.setdefault('binary', 'cinder-%s' % name) + svc = service.Service.create(**kwargs) + svc.start() + self._services.append(svc) + return svc + + # Useful assertions + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): + """Assert two dicts are equivalent. + + This is a 'deep' match in the sense that it handles nested + dictionaries appropriately. + + NOTE: + + If you don't care (or don't know) a given value, you can specify + the string DONTCARE as the value. This will cause that dict-item + to be skipped. + + """ + def raise_assertion(msg): + d1str = str(d1) + d2str = str(d2) + base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' + 'd2: %(d2str)s' % locals()) + raise AssertionError(base_msg) + + d1keys = set(d1.keys()) + d2keys = set(d2.keys()) + if d1keys != d2keys: + d1only = d1keys - d2keys + d2only = d2keys - d1keys + raise_assertion('Keys in d1 and not d2: %(d1only)s. ' + 'Keys in d2 and not d1: %(d2only)s' % locals()) + + for key in d1keys: + d1value = d1[key] + d2value = d2[key] + try: + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance + except (ValueError, TypeError): + # If both values aren't convertable to float, just ignore + # ValueError if arg is a str, TypeError if it's something else + # (like None) + within_tolerance = False + + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): + self.assertDictMatch(d1value, d2value) + elif 'DONTCARE' in (d1value, d2value): + continue + elif approx_equal and within_tolerance: + continue + elif d1value != d2value: + raise_assertion("d1['%(key)s']=%(d1value)s != " + "d2['%(key)s']=%(d2value)s" % locals()) + + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): + """Assert a list of dicts are equivalent.""" + def raise_assertion(msg): + L1str = str(L1) + L2str = str(L2) + base_msg = ('List of dictionaries do not match: %(msg)s ' + 'L1: %(L1str)s L2: %(L2str)s' % locals()) + raise AssertionError(base_msg) + + L1count = len(L1) + L2count = len(L2) + if L1count != L2count: + raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' + 'len(L2)=%(L2count)d' % locals()) + + for d1, d2 in zip(L1, L2): + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) + + def assertSubDictMatch(self, sub_dict, super_dict): + """Assert a sub_dict is subset of super_dict.""" + self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) + for k, sub_value in sub_dict.items(): + super_value = super_dict[k] + if isinstance(sub_value, dict): + self.assertSubDictMatch(sub_value, super_value) + elif 'DONTCARE' in (sub_value, super_value): + continue + else: + self.assertEqual(sub_value, super_value) + + def assertIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' in 'b'""" + try: + f = super(TestCase, self).assertIn + except AttributeError: + self.assertTrue(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) + + def assertNotIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" + try: + f = super(TestCase, self).assertNotIn + except AttributeError: + self.assertFalse(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) diff --git a/cinder/testing/README.rst b/cinder/testing/README.rst new file mode 100644 index 0000000000..9a9d314621 --- /dev/null +++ b/cinder/testing/README.rst @@ -0,0 +1,66 @@ +===================================== +OpenStack Cinder Testing Infrastructure +===================================== + +A note of clarification is in order, to help those who are new to testing in +OpenStack cinder: + +- actual unit tests are created in the "tests" directory; +- the "testing" directory is used to house the infrastructure needed to support + testing in OpenStack Cinder. + +This README file attempts to provide current and prospective contributors with +everything they need to know in order to start creating unit tests and +utilizing the convenience code provided in cinder.testing. + +Note: the content for the rest of this file will be added as the work items in +the following blueprint are completed: + https://blueprints.launchpad.net/cinder/+spec/consolidate-testing-infrastructure + + +Test Types: Unit vs. Functional vs. Integration +----------------------------------------------- + +TBD + +Writing Unit Tests +------------------ + +TBD + +Using Fakes +~~~~~~~~~~~ + +TBD + +test.TestCase +------------- +The TestCase class from cinder.test (generally imported as test) will +automatically manage self.stubs using the stubout module and self.mox +using the mox module during the setUp step. They will automatically +verify and clean up during the tearDown step. + +If using test.TestCase, calling the super class setUp is required and +calling the super class tearDown is required to be last if tearDown +is overridden. + +Writing Functional Tests +------------------------ + +TBD + +Writing Integration Tests +------------------------- + +TBD + +Tests and assertRaises +---------------------- +When asserting that a test should raise an exception, test against the +most specific exception possible. An overly broad exception type (like +Exception) can mask errors in the unit test itself. + +Example:: + + self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, + elevated, instance_uuid) diff --git a/cinder/tests/__init__.py b/cinder/tests/__init__.py new file mode 100644 index 0000000000..5027886ad6 --- /dev/null +++ b/cinder/tests/__init__.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.tests` -- Cinder Unittests +===================================================== + +.. automodule:: cinder.tests + :platform: Unix +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +import eventlet +eventlet.monkey_patch() + +# See http://code.google.com/p/python-nose/issues/detail?id=373 +# The code below enables nosetests to work with i18n _() blocks +import __builtin__ +setattr(__builtin__, '_', lambda x: x) +import os +import shutil + +from cinder.db.sqlalchemy.session import get_engine +from cinder import flags + +FLAGS = flags.FLAGS + +_DB = None + + +def reset_db(): + if FLAGS.sql_connection == "sqlite://": + engine = get_engine() + engine.dispose() + conn = engine.connect() + conn.connection.executescript(_DB) + else: + shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), + os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) + + +def setup(): + import mox # Fail fast if you don't have mox. Workaround for bug 810424 + + from cinder.db import migration + from cinder.tests import fake_flags + fake_flags.set_defaults(FLAGS) + + if FLAGS.sql_connection == "sqlite://": + if migration.db_version() > 1: + return + else: + testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) + if os.path.exists(testdb): + return + migration.db_sync() + + if FLAGS.sql_connection == "sqlite://": + global _DB + engine = get_engine() + conn = engine.connect() + _DB = "".join(line for line in conn.connection.iterdump()) + else: + cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) + shutil.copyfile(testdb, cleandb) diff --git a/cinder/tests/api/__init__.py b/cinder/tests/api/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/api/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/common.py b/cinder/tests/api/common.py new file mode 100644 index 0000000000..e030105653 --- /dev/null +++ b/cinder/tests/api/common.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def compare_links(actual, expected): + """Compare xml atom links.""" + + return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) + + +def compare_media_types(actual, expected): + """Compare xml media types.""" + + return compare_tree_to_dict(actual, expected, ('base', 'type')) + + +def compare_tree_to_dict(actual, expected, keys): + """Compare parts of lxml.etree objects to dicts.""" + + for elem, data in zip(actual, expected): + for key in keys: + if elem.get(key) != data.get(key): + return False + return True diff --git a/cinder/tests/api/contrib/__init__.py b/cinder/tests/api/contrib/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/api/contrib/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/contrib/stubs.py b/cinder/tests/api/contrib/stubs.py new file mode 100644 index 0000000000..ff900785e6 --- /dev/null +++ b/cinder/tests/api/contrib/stubs.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import exception as exc + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +def stub_share(id, **kwargs): + share = { + 'id': id, + 'share_proto': 'fakeproto', + 'export_location': 'fake_location', + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'status': 'fakestatus', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': '2', + } + share.update(kwargs) + return share + + +def stub_snapshot(id, **kwargs): + snapshot = { + 'id': id, + 'share_id': 'fakeshareid', + 'share_proto': 'fakesnapproto', + 'export_location': 'fakesnaplocation', + 'user_id': 'fakesnapuser', + 'project_id': 'fakesnapproject', + 'host': 'fakesnaphost', + 'share_size': 1, + 'status': 'fakesnapstatus', + 'share_name': 'fakesharename', + 'display_name': 'displaysnapname', + 'display_description': 'displaysnapdesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + } + snapshot.update(kwargs) + return snapshot + + +def stub_share_get(self, context, share_id): + return stub_share(share_id) + + +def stub_share_get_notfound(self, context, share_id): + raise exc.NotFound + + +def stub_share_create(self, context, share_proto, size, name, description, + **param): + share = stub_share('1') + share['status'] = 'creating' + share['share_proto'] = share_proto + share['size'] = size + share['display_name'] = name + share['display_description'] = description + return share + + +def stub_share_delete(self, context, *args, **param): + pass + + +def stub_share_update(self, context, *args, **param): + pass + + +def stub_share_get_all_by_project(self, context, search_opts=None): + return [stub_share_get(self, context, '1')] + + +def stub_get_all_shares(self, context): + return [stub_share(100, project_id='fake'), + stub_share(101, project_id='superfake'), + stub_share(102, project_id='superduperfake')] + + +def stub_snapshot_get(self, context, snapshot_id): + return stub_snapshot(snapshot_id) + + +def stub_snapshot_get_notfound(self, context, snapshot_id): + raise exc.NotFound + + +def stub_snapshot_create(self, context, share, display_name, + display_description): + return stub_snapshot(200, + share_id=share['id'], + display_name=display_name, + display_description=display_description) + + +def stub_snapshot_delete(self, context, *args, **param): + pass + + +def stub_snapshot_get_all_by_project(self, context, search_opts=None): + return [stub_snapshot_get(self, context, 2)] diff --git a/cinder/tests/api/contrib/test_admin_actions.py b/cinder/tests/api/contrib/test_admin_actions.py new file mode 100644 index 0000000000..d6ae606f30 --- /dev/null +++ b/cinder/tests/api/contrib/test_admin_actions.py @@ -0,0 +1,348 @@ +import shutil +import tempfile +import webob + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.volume import api as volume_api + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class AdminActionsTest(test.TestCase): + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(AdminActionsTest, self).setUp() + self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') + self.flags(lock_path=self.tempdir) + self.volume_api = volume_api.API() + + def tearDown(self): + shutil.rmtree(self.tempdir) + + def test_reset_status_as_admin(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'error' + self.assertEquals(volume['status'], 'error') + + def test_reset_status_as_non_admin(self): + # current status is 'error' + volume = db.volume_create(context.get_admin_context(), + {'status': 'error'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request changing status to available + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'available'}}) + # non-admin context + req.environ['cinder.context'] = context.RequestContext('fake', 'fake') + resp = req.get_response(app()) + # request is not authorized + self.assertEquals(resp.status_int, 403) + volume = db.volume_get(context.get_admin_context(), volume['id']) + # status is still 'error' + self.assertEquals(volume['status'], 'error') + + def test_malformed_reset_status_body(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # malformed request body + req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEquals(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status is still 'available' + self.assertEquals(volume['status'], 'available') + + def test_invalid_status_for_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'invalid' is not a valid status + req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEquals(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status is still 'available' + self.assertEquals(volume['status'], 'available') + + def test_reset_status_for_missing_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # missing-volume-id + req = webob.Request.blank('/v2/fake/volumes/%s/action' % + 'missing-volume-id') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # malformed request body + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'available'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # not found + self.assertEquals(resp.status_int, 404) + self.assertRaises(exception.NotFound, db.volume_get, ctx, + 'missing-volume-id') + + def test_reset_attached_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', + 'attach_status': 'attached'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request update attach_status to detached + body = {'os-reset_status': {'status': 'available', + 'attach_status': 'detached'}} + req.body = jsonutils.dumps(body) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # attach_status changed to 'detached' + self.assertEquals(volume['attach_status'], 'detached') + # status un-modified + self.assertEquals(volume['status'], 'available') + + def test_invalid_reset_attached_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', + 'attach_status': 'detached'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'invalid' is not a valid attach_status + body = {'os-reset_status': {'status': 'available', + 'attach_status': 'invalid'}} + req.body = jsonutils.dumps(body) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEquals(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status and attach_status un-modified + self.assertEquals(volume['status'], 'available') + self.assertEquals(volume['attach_status'], 'detached') + + def test_snapshot_reset_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # snapshot in 'error_deleting' + volume = db.volume_create(ctx, {}) + snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', + 'volume_id': volume['id']}) + req = webob.Request.blank('/v2/fake/snapshots/%s/action' % + snapshot['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + snapshot = db.snapshot_get(ctx, snapshot['id']) + # status changed to 'error' + self.assertEquals(snapshot['status'], 'error') + + def test_invalid_status_for_snapshot(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # snapshot in 'available' + volume = db.volume_create(ctx, {}) + snapshot = db.snapshot_create(ctx, {'status': 'available', + 'volume_id': volume['id']}) + req = webob.Request.blank('/v2/fake/snapshots/%s/action' % + snapshot['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'attaching' is not a valid status for snapshots + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'attaching'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 400) + snapshot = db.snapshot_get(ctx, snapshot['id']) + # status is still 'available' + self.assertEquals(snapshot['status'], 'available') + + def test_force_delete(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is creating + volume = db.volume_create(ctx, {'status': 'creating'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-force_delete': {}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + # volume is deleted + self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id']) + + def test_force_delete_snapshot(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is creating + volume = db.volume_create(ctx, {'host': 'test'}) + snapshot = db.snapshot_create(ctx, {'status': 'creating', + 'volume_size': 1, + 'volume_id': volume['id']}) + path = '/v2/fake/snapshots/%s/action' % snapshot['id'] + req = webob.Request.blank(path) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-force_delete': {}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # start service to handle rpc.cast for 'delete snapshot' + svc = self.start_service('volume', host='test') + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + # snapshot is deleted + self.assertRaises(exception.NotFound, db.snapshot_get, ctx, + snapshot['id']) + # cleanup + svc.stop() + + def test_force_detach_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + self.volume_api.initialize_connection(ctx, volume, {}) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) + # volume is attached + volume = db.volume_get(ctx, volume['id']) + self.assertEquals(volume['status'], 'in-use') + self.assertEquals(volume['instance_uuid'], stubs.FAKE_UUID) + self.assertEquals(volume['mountpoint'], mountpoint) + self.assertEquals(volume['attach_status'], 'attached') + # build request to force detach + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-force_detach': None}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'available' + self.assertEquals(volume['status'], 'available') + self.assertEquals(volume['instance_uuid'], None) + self.assertEquals(volume['mountpoint'], None) + self.assertEquals(volume['attach_status'], 'detached') + # cleanup + svc.stop() + + def test_attach_in_use_volume(self): + """Test that attaching to an in-use volume fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + self.volume_api.initialize_connection(ctx, volume, {}) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + fakes.get_fake_uuid(), + mountpoint) + # cleanup + svc.stop() + + def test_attach_attaching_volume_with_different_instance(self): + """Test that attaching volume reserved for another instance fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.initialize_connection(ctx, volume, {}) + values = {'status': 'attaching', + 'instance_uuid': fakes.get_fake_uuid()} + db.volume_update(ctx, volume['id'], values) + mountpoint = '/dev/vbd' + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + stubs.FAKE_UUID, + mountpoint) + # cleanup + svc.stop() diff --git a/cinder/tests/api/contrib/test_backups.py b/cinder/tests/api/contrib/test_backups.py new file mode 100644 index 0000000000..899814a70d --- /dev/null +++ b/cinder/tests/api/contrib/test_backups.py @@ -0,0 +1,860 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for Backup code. +""" + +import json +from xml.dom import minidom + +import webob + +# needed for stubs to work +import cinder.backup +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes +# needed for stubs to work +import cinder.volume + + +LOG = logging.getLogger(__name__) + + +class BackupsAPITestCase(test.TestCase): + """Test Case for backups API.""" + + def setUp(self): + super(BackupsAPITestCase, self).setUp() + + def tearDown(self): + super(BackupsAPITestCase, self).tearDown() + + @staticmethod + def _create_backup(volume_id=1, + display_name='test_backup', + display_description='this is a test backup', + container='volumebackups', + status='creating', + size=0, object_count=0): + """Create a backup object.""" + backup = {} + backup['volume_id'] = volume_id + backup['user_id'] = 'fake' + backup['project_id'] = 'fake' + backup['host'] = 'testhost' + backup['availability_zone'] = 'az1' + backup['display_name'] = display_name + backup['display_description'] = display_description + backup['container'] = container + backup['status'] = status + backup['fail_reason'] = '' + backup['size'] = size + backup['object_count'] = object_count + return db.backup_create(context.get_admin_context(), backup)['id'] + + @staticmethod + def _get_backup_attrib(backup_id, attrib_name): + return db.backup_get(context.get_admin_context(), + backup_id)[attrib_name] + + @staticmethod + def _create_volume(display_name='test_volume', + display_description='this is a test volume', + status='creating', + size=1): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['status'] = status + vol['display_name'] = display_name + vol['display_description'] = display_description + vol['attach_status'] = 'detached' + return db.volume_create(context.get_admin_context(), vol)['id'] + + def test_show_backup(self): + volume_id = self._create_volume(size=5) + backup_id = self._create_backup(volume_id) + LOG.debug('Created backup with id %s' % backup_id) + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['backup']['availability_zone'], 'az1') + self.assertEqual(res_dict['backup']['container'], 'volumebackups') + self.assertEqual(res_dict['backup']['description'], + 'this is a test backup') + self.assertEqual(res_dict['backup']['name'], 'test_backup') + self.assertEqual(res_dict['backup']['id'], backup_id) + self.assertEqual(res_dict['backup']['object_count'], 0) + self.assertEqual(res_dict['backup']['size'], 0) + self.assertEqual(res_dict['backup']['status'], 'creating') + self.assertEqual(res_dict['backup']['volume_id'], volume_id) + + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_backup_xml_content_type(self): + volume_id = self._create_volume(size=5) + backup_id = self._create_backup(volume_id) + req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup = dom.getElementsByTagName('backup') + name = backup.item(0).getAttribute('name') + container_name = backup.item(0).getAttribute('container') + self.assertEquals(container_name.strip(), "volumebackups") + self.assertEquals(name.strip(), "test_backup") + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_backup_with_backup_NotFound(self): + req = webob.Request.blank('/v2/fake/backups/9999') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + def test_list_backups_json(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['backups'][0]), 3) + self.assertEqual(res_dict['backups'][0]['id'], backup_id1) + self.assertEqual(res_dict['backups'][0]['name'], 'test_backup') + self.assertEqual(len(res_dict['backups'][1]), 3) + self.assertEqual(res_dict['backups'][1]['id'], backup_id2) + self.assertEqual(res_dict['backups'][1]['name'], 'test_backup') + self.assertEqual(len(res_dict['backups'][2]), 3) + self.assertEqual(res_dict['backups'][2]['id'], backup_id3) + self.assertEqual(res_dict['backups'][2]['name'], 'test_backup') + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_xml(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup_list = dom.getElementsByTagName('backup') + + self.assertEqual(backup_list.item(0).attributes.length, 2) + self.assertEqual(backup_list.item(0).getAttribute('id'), + backup_id1) + self.assertEqual(backup_list.item(1).attributes.length, 2) + self.assertEqual(backup_list.item(1).getAttribute('id'), + backup_id2) + self.assertEqual(backup_list.item(2).attributes.length, 2) + self.assertEqual(backup_list.item(2).getAttribute('id'), + backup_id3) + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_detail_json(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['backups'][0]), 12) + self.assertEqual(res_dict['backups'][0]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][0]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][0]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][0]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][0]['id'], backup_id1) + self.assertEqual(res_dict['backups'][0]['object_count'], 0) + self.assertEqual(res_dict['backups'][0]['size'], 0) + self.assertEqual(res_dict['backups'][0]['status'], 'creating') + self.assertEqual(res_dict['backups'][0]['volume_id'], '1') + + self.assertEqual(len(res_dict['backups'][1]), 12) + self.assertEqual(res_dict['backups'][1]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][1]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][1]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][1]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][1]['id'], backup_id2) + self.assertEqual(res_dict['backups'][1]['object_count'], 0) + self.assertEqual(res_dict['backups'][1]['size'], 0) + self.assertEqual(res_dict['backups'][1]['status'], 'creating') + self.assertEqual(res_dict['backups'][1]['volume_id'], '1') + + self.assertEqual(len(res_dict['backups'][2]), 12) + self.assertEqual(res_dict['backups'][2]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][2]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][2]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][2]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][2]['id'], backup_id3) + self.assertEqual(res_dict['backups'][2]['object_count'], 0) + self.assertEqual(res_dict['backups'][2]['size'], 0) + self.assertEqual(res_dict['backups'][2]['status'], 'creating') + self.assertEqual(res_dict['backups'][2]['volume_id'], '1') + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_detail_xml(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup_detail = dom.getElementsByTagName('backup') + + self.assertEqual(backup_detail.item(0).attributes.length, 11) + self.assertEqual( + backup_detail.item(0).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(0).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(0).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(0).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(0).getAttribute('id'), backup_id1) + self.assertEqual( + int(backup_detail.item(0).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(0).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(0).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(0).getAttribute('volume_id')), 1) + + self.assertEqual(backup_detail.item(1).attributes.length, 11) + self.assertEqual( + backup_detail.item(1).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(1).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(1).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(1).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(1).getAttribute('id'), backup_id2) + self.assertEqual( + int(backup_detail.item(1).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(1).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(1).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(1).getAttribute('volume_id')), 1) + + self.assertEqual(backup_detail.item(2).attributes.length, 11) + self.assertEqual( + backup_detail.item(2).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(2).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(2).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(2).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(2).getAttribute('id'), backup_id3) + self.assertEqual( + int(backup_detail.item(2).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(2).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(2).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(2).getAttribute('volume_id')), 1) + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_create_backup_json(self): + volume_id = self._create_volume(status='available', size=5) + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + LOG.info(res_dict) + + self.assertEqual(res.status_int, 202) + self.assertTrue('id' in res_dict['backup']) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_backup_xml(self): + volume_size = 2 + volume_id = self._create_volume(status='available', size=volume_size) + + req = webob.Request.blank('/v2/fake/backups') + req.body = ('' % volume_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + backup = dom.getElementsByTagName('backup') + self.assertTrue(backup.item(0).hasAttribute('id')) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_backup_with_no_body(self): + # omit body from the request + req = webob.Request.blank('/v2/fake/backups') + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_create_backup_with_body_KeyError(self): + # omit volume_id from body + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Incorrect request body format') + + def test_create_backup_with_VolumeNotFound(self): + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": 9999, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 9999 could not be found.') + + def test_create_backup_with_InvalidVolume(self): + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='restoring', size=volume_size) + + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: Volume to be backed up must' + ' be available') + + def test_delete_backup_available(self): + backup_id = self._create_backup(status='available') + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + self.assertEqual(self._get_backup_attrib(backup_id, 'status'), + 'deleting') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_delete_backup_error(self): + backup_id = self._create_backup(status='error') + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + self.assertEqual(self._get_backup_attrib(backup_id, 'status'), + 'deleting') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_delete_backup_with_backup_NotFound(self): + req = webob.Request.blank('/v2/fake/backups/9999') + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + def test_delete_backup_with_InvalidBackup(self): + backup_id = self._create_backup() + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid backup: Backup status must be ' + 'available or error') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_volume_id_specified_json(self): + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + self.assertEqual(res_dict['restore']['volume_id'], volume_id) + + def test_restore_backup_volume_id_specified_xml(self): + backup_id = self._create_backup(status='available') + volume_size = 2 + volume_id = self._create_volume(status='available', size=volume_size) + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) + req.body = '' % volume_id + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + restore = dom.getElementsByTagName('restore') + self.assertEqual(restore.item(0).getAttribute('backup_id'), + backup_id) + self.assertEqual(restore.item(0).getAttribute('volume_id'), volume_id) + + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_restore_backup_with_no_body(self): + # omit body from the request + backup_id = self._create_backup(status='available') + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_body_KeyError(self): + # omit restore from body + backup_id = self._create_backup(status='available') + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) + body = {"": {}} + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_restore_backup_volume_id_unspecified(self): + + # intercept volume creation to ensure created volume + # has status of available + def fake_volume_api_create(cls, context, size, name, description): + volume_id = self._create_volume(status='available', size=size) + return db.volume_get(context, volume_id) + + self.stubs.Set(cinder.volume.API, 'create', + fake_volume_api_create) + + backup_id = self._create_backup(size=5, status='available') + + body = {"restore": {}} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + + def test_restore_backup_with_InvalidInput(self): + + def fake_backup_api_restore_throwing_InvalidInput(cls, context, + backup_id, + volume_id): + msg = _("Invalid input") + raise exception.InvalidInput(reason=msg) + + self.stubs.Set(cinder.backup.API, 'restore', + fake_backup_api_restore_throwing_InvalidInput) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 0 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid input received: Invalid input') + + def test_restore_backup_with_InvalidVolume(self): + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='attaching', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: Volume to be restored to must ' + 'be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_InvalidBackup(self): + backup_id = self._create_backup(status='restoring') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid backup: Backup status must be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_BackupNotFound(self): + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/9999/restore') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_restore_backup_with_VolumeNotFound(self): + backup_id = self._create_backup(status='available') + + body = {"restore": {"volume_id": "9999", }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 9999 could not be found.') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(self): + + def fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota( + cls, context, backup_id, volume_id): + raise exception.VolumeSizeExceedsAvailableQuota() + + self.stubs.Set( + cinder.backup.API, + 'restore', + fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Requested volume or snapshot exceeds allowed ' + 'Gigabytes quota') + + def test_restore_backup_with_VolumeLimitExceeded(self): + + def fake_backup_api_restore_throwing_VolumeLimitExceeded(cls, + context, + backup_id, + volume_id): + raise exception.VolumeLimitExceeded(allowed=1) + + self.stubs.Set(cinder.backup.API, 'restore', + fake_backup_api_restore_throwing_VolumeLimitExceeded) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Maximum number of volumes allowed ' + '(%(allowed)d) exceeded') + + def test_restore_backup_to_undersized_volume(self): + backup_size = 10 + backup_id = self._create_backup(status='available', size=backup_size) + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: volume size %d is too ' + 'small to restore backup of size %d.' + % (volume_size, backup_size)) + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_to_oversized_volume(self): + backup_id = self._create_backup(status='available', size=10) + # need to create the volume referenced below first + volume_size = 15 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + self.assertEqual(res_dict['restore']['volume_id'], volume_id) + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) diff --git a/cinder/tests/api/contrib/test_extended_snapshot_attributes.py b/cinder/tests/api/contrib/test_extended_snapshot_attributes.py new file mode 100644 index 0000000000..5937ac9628 --- /dev/null +++ b/cinder/tests/api/contrib/test_extended_snapshot_attributes.py @@ -0,0 +1,124 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.contrib import extended_snapshot_attributes +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +FLAGS = flags.FLAGS + + +UUID1 = '00000000-0000-0000-0000-000000000001' +UUID2 = '00000000-0000-0000-0000-000000000002' + + +def _get_default_snapshot_param(): + return {'id': UUID1, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake', + 'progress': '0%'} + + +def fake_snapshot_get(self, context, snapshot_id): + param = _get_default_snapshot_param() + return param + + +def fake_snapshot_get_all(self, context, search_opts=None): + param = _get_default_snapshot_param() + return [param] + + +class ExtendedSnapshotAttributesTest(test.TestCase): + content_type = 'application/json' + prefix = 'os-extended-snapshot-attributes:' + + def setUp(self): + super(ExtendedSnapshotAttributesTest, self).setUp() + self.stubs.Set(volume.api.API, 'get_snapshot', fake_snapshot_get) + self.stubs.Set(volume.api.API, 'get_all_snapshots', + fake_snapshot_get_all) + + def _make_request(self, url): + req = webob.Request.blank(url) + req.headers['Accept'] = self.content_type + res = req.get_response(fakes.wsgi_app()) + return res + + def _get_snapshot(self, body): + return jsonutils.loads(body).get('snapshot') + + def _get_snapshots(self, body): + return jsonutils.loads(body).get('snapshots') + + def assertSnapshotAttributes(self, snapshot, project_id, progress): + self.assertEqual(snapshot.get('%sproject_id' % self.prefix), + project_id) + self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress) + + def test_show(self): + url = '/v2/fake/snapshots/%s' % UUID2 + res = self._make_request(url) + + self.assertEqual(res.status_int, 200) + self.assertSnapshotAttributes(self._get_snapshot(res.body), + project_id='fake', + progress='0%') + + def test_detail(self): + url = '/v2/fake/snapshots/detail' + res = self._make_request(url) + + self.assertEqual(res.status_int, 200) + for i, snapshot in enumerate(self._get_snapshots(res.body)): + self.assertSnapshotAttributes(snapshot, + project_id='fake', + progress='0%') + + def test_no_instance_passthrough_404(self): + + def fake_snapshot_get(*args, **kwargs): + raise exception.InstanceNotFound(instance_id='fake') + + self.stubs.Set(volume.api.API, 'get_snapshot', fake_snapshot_get) + url = '/v2/fake/snapshots/70f6db34-de8d-4fbd-aafb-4065bdfa6115' + res = self._make_request(url) + + self.assertEqual(res.status_int, 404) + + +class ExtendedSnapshotAttributesXmlTest(ExtendedSnapshotAttributesTest): + content_type = 'application/xml' + ext = extended_snapshot_attributes + prefix = '{%s}' % ext.Extended_snapshot_attributes.namespace + + def _get_snapshot(self, body): + return etree.XML(body) + + def _get_snapshots(self, body): + return etree.XML(body).getchildren() diff --git a/cinder/tests/api/contrib/test_hosts.py b/cinder/tests/api/contrib/test_hosts.py new file mode 100644 index 0000000000..525f963051 --- /dev/null +++ b/cinder/tests/api/contrib/test_hosts.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob.exc + +from cinder.api.contrib import hosts as os_hosts +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import test + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) +created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) +curr_time = timeutils.utcnow() + +SERVICE_LIST = [ + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}] + +LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}] + + +def stub_service_get_all(self, req): + return SERVICE_LIST + + +class FakeRequest(object): + environ = {'cinder.context': context.get_admin_context()} + GET = {} + + +class FakeRequestWithcinderZone(object): + environ = {'cinder.context': context.get_admin_context()} + GET = {'zone': 'cinder'} + + +class HostTestCase(test.TestCase): + """Test Case for hosts.""" + + def setUp(self): + super(HostTestCase, self).setUp() + self.controller = os_hosts.HostController() + self.req = FakeRequest() + self.stubs.Set(db, 'service_get_all', + stub_service_get_all) + + def _test_host_update(self, host, key, val, expected_value): + body = {key: val} + result = self.controller.update(self.req, host, body=body) + self.assertEqual(result[key], expected_value) + + def test_list_hosts(self): + """Verify that the volume hosts are returned.""" + hosts = os_hosts._list_hosts(self.req) + self.assertEqual(hosts, LIST_RESPONSE) + + cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume') + expected = [host for host in LIST_RESPONSE + if host['service'] == 'cinder-volume'] + self.assertEqual(cinder_hosts, expected) + + def test_list_hosts_with_zone(self): + req = FakeRequestWithcinderZone() + hosts = os_hosts._list_hosts(req) + self.assertEqual(hosts, LIST_RESPONSE) + + def test_bad_status_value(self): + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body={'status': 'bad'}) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + self.req, + 'test.host.1', + body={'status': 'disablabc'}) + + def test_bad_update_key(self): + bad_body = {'crazy': 'bad'} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body=bad_body) + + def test_bad_update_key_and_correct_udpate_key(self): + bad_body = {'status': 'disable', 'crazy': 'bad'} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body=bad_body) + + def test_good_udpate_keys(self): + body = {'status': 'disable'} + self.assertRaises(NotImplementedError, self.controller.update, + self.req, 'test.host.1', body=body) + + def test_bad_host(self): + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + self.req, + 'bogus_host_name', + body={'disabled': 0}) + + def test_show_forbidden(self): + self.req.environ['cinder.context'].is_admin = False + dest = 'dummydest' + self.assertRaises(webob.exc.HTTPForbidden, + self.controller.show, + self.req, dest) + self.req.environ['cinder.context'].is_admin = True + + def test_show_host_not_exist(self): + """A host given as an argument does not exists.""" + self.req.environ['cinder.context'].is_admin = True + dest = 'dummydest' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + self.req, dest) + + +class HostSerializerTest(test.TestCase): + def setUp(self): + super(HostSerializerTest, self).setUp() + self.deserializer = os_hosts.HostDeserializer() + + def test_index_serializer(self): + serializer = os_hosts.HostIndexTemplate() + text = serializer.serialize({"hosts": LIST_RESPONSE}) + + tree = etree.fromstring(text) + + self.assertEqual('hosts', tree.tag) + self.assertEqual(len(LIST_RESPONSE), len(tree)) + for i in range(len(LIST_RESPONSE)): + self.assertEqual('host', tree[i].tag) + self.assertEqual(LIST_RESPONSE[i]['service-status'], + tree[i].get('service-status')) + self.assertEqual(LIST_RESPONSE[i]['service'], + tree[i].get('service')) + self.assertEqual(LIST_RESPONSE[i]['zone'], + tree[i].get('zone')) + self.assertEqual(LIST_RESPONSE[i]['service-state'], + tree[i].get('service-state')) + self.assertEqual(LIST_RESPONSE[i]['host_name'], + tree[i].get('host_name')) + self.assertEqual(str(LIST_RESPONSE[i]['last-update']), + tree[i].get('last-update')) + + def test_update_serializer_with_status(self): + exemplar = dict(host='test.host.1', status='enabled') + serializer = os_hosts.HostUpdateTemplate() + text = serializer.serialize(exemplar) + + tree = etree.fromstring(text) + + self.assertEqual('host', tree.tag) + for key, value in exemplar.items(): + self.assertEqual(value, tree.get(key)) + + def test_update_deserializer(self): + exemplar = dict(status='enabled', foo='bar') + intext = ("\n" + 'enabledbar') + result = self.deserializer.deserialize(intext) + + self.assertEqual(dict(body=exemplar), result) diff --git a/cinder/tests/api/contrib/test_services.py b/cinder/tests/api/contrib/test_services.py new file mode 100644 index 0000000000..1f823c356a --- /dev/null +++ b/cinder/tests/api/contrib/test_services.py @@ -0,0 +1,216 @@ +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.contrib import services +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import timeutils +from cinder import policy +from cinder import test +from cinder.tests.api import fakes +from datetime import datetime + + +fake_services_list = [{'binary': 'cinder-scheduler', + 'host': 'host1', + 'availability_zone': 'cinder', + 'id': 1, + 'disabled': True, + 'updated_at': datetime(2012, 10, 29, 13, 42, 2), + 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, + {'binary': 'cinder-volume', + 'host': 'host1', + 'availability_zone': 'cinder', + 'id': 2, + 'disabled': True, + 'updated_at': datetime(2012, 10, 29, 13, 42, 5), + 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, + {'binary': 'cinder-scheduler', + 'host': 'host2', + 'availability_zone': 'cinder', + 'id': 3, + 'disabled': False, + 'updated_at': datetime(2012, 9, 19, 6, 55, 34), + 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'availability_zone': 'cinder', + 'id': 4, + 'disabled': True, + 'updated_at': datetime(2012, 9, 18, 8, 3, 38), + 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, + ] + + +class FakeRequest(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {} + + +class FakeRequestWithSevice(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"service": "cinder-volume"} + + +class FakeRequestWithHost(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1"} + + +class FakeRequestWithHostService(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1", "service": "cinder-volume"} + + +def fake_servcie_get_all(context): + return fake_services_list + + +def fake_service_get_by_host_binary(context, host, binary): + for service in fake_services_list: + if service['host'] == host and service['binary'] == binary: + return service + return None + + +def fake_service_get_by_id(value): + for service in fake_services_list: + if service['id'] == value: + return service + return None + + +def fake_service_update(context, service_id, values): + service = fake_service_get_by_id(service_id) + if service is None: + raise exception.ServiceNotFound(service_id=service_id) + else: + {'host': 'host1', 'service': 'cinder-volume', + 'disabled': values['disabled']} + + +def fake_policy_enforce(context, action, target): + pass + + +def fake_utcnow(): + return datetime(2012, 10, 29, 13, 42, 11) + + +class ServicesTest(test.TestCase): + + def setUp(self): + super(ServicesTest, self).setUp() + + self.stubs.Set(db, "service_get_all", fake_servcie_get_all) + self.stubs.Set(timeutils, "utcnow", fake_utcnow) + self.stubs.Set(db, "service_get_by_args", + fake_service_get_by_host_binary) + self.stubs.Set(db, "service_update", fake_service_update) + self.stubs.Set(policy, "enforce", fake_policy_enforce) + + self.context = context.get_admin_context() + self.controller = services.ServiceController() + + def tearDown(self): + super(ServicesTest, self).tearDown() + + def test_services_list(self): + req = FakeRequest() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, + {'binary': 'cinder-volume', + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, + {'binary': 'cinder-scheduler', 'host': 'host2', + 'zone': 'cinder', + 'status': 'enabled', 'state': 'up', + 'updated_at': datetime(2012, 9, 19, 6, 55, 34)}, + {'binary': 'cinder-volume', 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host(self): + req = FakeRequestWithHost() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, + 29, 13, 42, 2)}, + {'binary': 'cinder-volume', 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_service(self): + req = FakeRequestWithSevice() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 9, 18, + 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host_service(self): + req = FakeRequestWithHostService() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_enable(self): + body = {'host': 'host1', 'service': 'cinder-volume'} + req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') + res_dict = self.controller.update(req, "enable", body) + + self.assertEqual(res_dict['disabled'], False) + + def test_services_disable(self): + req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') + body = {'host': 'host1', 'service': 'cinder-volume'} + res_dict = self.controller.update(req, "disable", body) + + self.assertEqual(res_dict['disabled'], True) diff --git a/cinder/tests/api/contrib/test_share_actions.py b/cinder/tests/api/contrib/test_share_actions.py new file mode 100644 index 0000000000..20bc3a3027 --- /dev/null +++ b/cinder/tests/api/contrib/test_share_actions.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import uuid +import webob + +from cinder.api.contrib import share_actions +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common.rpc import common as rpc_common +from cinder import share +from cinder.share import api as share_api +from cinder import test +from cinder.tests.api.contrib import stubs +from cinder.tests.api import fakes + + +FLAGS = flags.FLAGS + + +def _fake_access_get(self, ctxt, access_id): + + class Access(object): + def __init__(self, **kwargs): + self.STATE_NEW = 'fake_new' + self.STATE_ACTIVE = 'fake_active' + self.STATE_ERROR = 'fake_error' + self.params = kwargs + self.params['state'] = self.STATE_NEW + self.share_id = kwargs.get('share_id') + self.id = access_id + + def __getitem__(self, item): + return self.params[item] + + access = Access(access_id=access_id, share_id='fake_share_id') + return access + + +class ShareActionsTest(test.TestCase): + def setUp(self): + super(ShareActionsTest, self).setUp() + self.controller = share_actions.ShareActionsController() + + self.stubs.Set(share_api.API, 'get', stubs.stub_share_get) + + def test_allow_access(self): + def _stub_allow_access(*args, **kwargs): + pass + self.stubs.Set(share_api.API, "allow_access", _stub_allow_access) + + id = 'fake_share_id' + body = {"os-allow_access": {"access_type": 'fakeip', + "access_to": '127.0.0.1'}} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + res = self.controller._allow_access(req, id, body) + self.assertEqual(res.status_int, 202) + + def test_deny_access(self): + def _stub_deny_access(*args, **kwargs): + pass + + self.stubs.Set(share_api.API, "deny_access", _stub_deny_access) + self.stubs.Set(share_api.API, "access_get", _fake_access_get) + + id = 'fake_share_id' + body = {"os-deny_access": {"access_id": 'fake_acces_id'}} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + res = self.controller._deny_access(req, id, body) + self.assertEqual(res.status_int, 202) + + def test_deny_access_not_found(self): + def _stub_deny_access(*args, **kwargs): + pass + + self.stubs.Set(share_api.API, "deny_access", _stub_deny_access) + self.stubs.Set(share_api.API, "access_get", _fake_access_get) + + id = 'super_fake_share_id' + body = {"os-deny_access": {"access_id": 'fake_acces_id'}} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller._deny_access, + req, + id, + body) + + def test_access_list(self): + def _fake_access_get_all(*args, **kwargs): + return [{"state": "fakestatus", + "id": "fake_share_id", + "access_type": "fakeip", + "access_to": "127.0.0.1"}] + + self.stubs.Set(share_api.API, "access_get_all", _fake_access_get_all) + id = 'fake_share_id' + body = {"os-access_list": None} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + res_dict = self.controller._access_list(req, id, body) + expected = _fake_access_get_all() + self.assertEqual(res_dict['access_list'], expected) diff --git a/cinder/tests/api/contrib/test_share_snapshots.py b/cinder/tests/api/contrib/test_share_snapshots.py new file mode 100644 index 0000000000..bc9d2ae94f --- /dev/null +++ b/cinder/tests/api/contrib/test_share_snapshots.py @@ -0,0 +1,190 @@ +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import webob + +from cinder.api.contrib import share_snapshots +from cinder import exception +from cinder.share import api as share_api +from cinder import test +from cinder.tests.api.contrib import stubs +from cinder.tests.api import fakes + + +class ShareSnapshotApiTest(test.TestCase): + """Share Snapshot Api Test.""" + + def setUp(self): + super(ShareSnapshotApiTest, self).setUp() + self.controller = share_snapshots.ShareSnapshotsController() + + self.stubs.Set(share_api.API, 'get', stubs.stub_share_get) + self.stubs.Set(share_api.API, 'get_all_snapshots', + stubs.stub_snapshot_get_all_by_project) + self.stubs.Set(share_api.API, 'get_snapshot', + stubs.stub_snapshot_get) + + self.maxDiff = None + + def test_snapshot_create(self): + self.stubs.Set(share_api.API, 'create_snapshot', + stubs.stub_snapshot_create) + body = { + 'share-snapshot': { + 'share_id': 100, + 'force': False, + 'name': 'fake_share_name', + 'description': 'fake_share_description', + } + } + req = fakes.HTTPRequest.blank('/share-snapshots') + res_dict = self.controller.create(req, body) + expected = { + 'share-snapshot': { + 'id': 200, + 'name': 'fake_share_name', + 'links': [ + { + 'href': 'http://localhost/v1/fake/share-snapshots/200', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/share-snapshots/200', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_snapshot_create_no_body(self): + body = {} + req = fakes.HTTPRequest.blank('/share-snapshots') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, + req, + body) + + def test_snapshot_delete(self): + self.stubs.Set(share_api.API, 'delete_snapshot', + stubs.stub_snapshot_delete) + req = fakes.HTTPRequest.blank('/share-snapshots/200') + resp = self.controller.delete(req, 200) + self.assertEqual(resp.status_int, 202) + + def test_snapshot_delete_nofound(self): + self.stubs.Set(share_api.API, 'get_snapshot', + stubs.stub_snapshot_get_notfound) + req = fakes.HTTPRequest.blank('/share-snapshots/200') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 200) + + def test_snapshot_show(self): + req = fakes.HTTPRequest.blank('/share-snapshots/200') + res_dict = self.controller.show(req, 200) + expected = { + 'share-snapshot': { + 'id': 200, + 'share_id': 'fakeshareid', + 'share_size': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'fakesnapstatus', + 'name': 'displaysnapname', + 'description': 'displaysnapdesc', + 'share_proto': 'fakesnapproto', + 'export_location': 'fakesnaplocation', + 'links': [ + { + 'href': 'http://localhost/v1/fake/share-snapshots/200', + 'rel': 'self', + }, + { + 'href': 'http://localhost/fake/share-snapshots/200', + 'rel': 'bookmark', + }, + ], + } + } + self.assertEqual(res_dict, expected) + + def test_snapshot_show_nofound(self): + self.stubs.Set(share_api.API, 'get_snapshot', + stubs.stub_snapshot_get_notfound) + req = fakes.HTTPRequest.blank('/share-snapshots/200') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, '200') + + def test_snapshot_list_summary(self): + self.stubs.Set(share_api.API, 'get_all_snapshots', + stubs.stub_snapshot_get_all_by_project) + req = fakes.HTTPRequest.blank('/share-snapshots') + res_dict = self.controller.index(req) + expected = { + 'share-snapshots': [ + { + 'name': 'displaysnapname', + 'id': 2, + 'links': [ + { + 'href': 'http://localhost/v1/fake/' + 'share-snapshots/2', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/share-snapshots/2', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_snapshot_list_detail(self): + env = {'QUERY_STRING': 'name=Share+Test+Name'} + req = fakes.HTTPRequest.blank('/shares/detail', environ=env) + res_dict = self.controller.detail(req) + expected = { + 'share-snapshots': [ + { + 'id': 2, + 'share_id': 'fakeshareid', + 'share_size': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'fakesnapstatus', + 'name': 'displaysnapname', + 'description': 'displaysnapdesc', + 'share_proto': 'fakesnapproto', + 'export_location': 'fakesnaplocation', + 'links': [ + { + 'href': 'http://localhost/v1/fake/share-snapshots/' + '2', + 'rel': 'self', + }, + { + 'href': 'http://localhost/fake/share-snapshots/2', + 'rel': 'bookmark', + }, + ], + }, + ] + } + self.assertEqual(res_dict, expected) diff --git a/cinder/tests/api/contrib/test_shares.py b/cinder/tests/api/contrib/test_shares.py new file mode 100644 index 0000000000..ef0e98fe49 --- /dev/null +++ b/cinder/tests/api/contrib/test_shares.py @@ -0,0 +1,245 @@ +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import webob + +from cinder.api.contrib import shares +from cinder import context +from cinder import exception +from cinder.share import api as share_api +from cinder import test +from cinder.tests.api.contrib import stubs +from cinder.tests.api import fakes + + +class ShareApiTest(test.TestCase): + """Share Api Test.""" + def setUp(self): + super(ShareApiTest, self).setUp() + self.controller = shares.ShareController() + + self.stubs.Set(share_api.API, 'get_all', + stubs.stub_get_all_shares) + self.stubs.Set(share_api.API, 'get', + stubs.stub_share_get) + self.stubs.Set(share_api.API, 'delete', stubs.stub_share_delete) + self.stubs.Set(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) + self.maxDiff = None + + def test_share_create(self): + self.stubs.Set(share_api.API, 'create', stubs.stub_share_create) + shr = { + "size": 100, + "name": "Share Test Name", + "description": "Share Test Desc", + "share_proto": "fakeproto", + "availability_zone": "zone1:host1" + } + body = {"share": shr} + req = fakes.HTTPRequest.blank('/shares') + res_dict = self.controller.create(req, body) + expected = { + 'share': { + 'name': 'Share Test Name', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_share_create_from_snapshot(self): + self.stubs.Set(share_api.API, 'create', stubs.stub_share_create) + shr = { + "size": 100, + "name": "Share Test Name", + "description": "Share Test Desc", + "share_proto": "fakeproto", + "availability_zone": "zone1:host1", + "snapshot_id": 333, + } + body = {"share": shr} + req = fakes.HTTPRequest.blank('/shares') + res_dict = self.controller.create(req, body) + expected = { + 'share': { + 'name': 'Share Test Name', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_share_creation_fails_with_bad_size(self): + shr = {"size": '', + "name": "Share Test Name", + "description": "Share Test Desc", + "share_proto": "fakeproto", + "availability_zone": "zone1:host1"} + body = {"share": shr} + req = fakes.HTTPRequest.blank('/shares') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, + body) + + def test_share_create_no_body(self): + body = {} + req = fakes.HTTPRequest.blank('/shares') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, + req, + body) + + def test_share_show(self): + req = fakes.HTTPRequest.blank('/shares/1') + res_dict = self.controller.show(req, '1') + print res_dict + expected = { + 'share': {'name': 'displayname', + 'availability_zone': 'fakeaz', + 'description': 'displaydesc', + 'export_location': 'fake_location', + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'share_proto': 'fakeproto', + 'size': 1, + 'snapshot_id': '2', + 'status': 'fakestatus', + 'links': [{'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self'}, + {'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark'}] + } + } + self.assertEqual(res_dict, expected) + + def test_share_show_no_share(self): + self.stubs.Set(share_api.API, 'get', + stubs.stub_share_get_notfound) + req = fakes.HTTPRequest.blank('/shares/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, '1') + + def test_share_delete(self): + req = fakes.HTTPRequest.blank('/shares/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_share_delete_no_share(self): + self.stubs.Set(share_api.API, 'get', + stubs.stub_share_get_notfound) + req = fakes.HTTPRequest.blank('/shares/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 1) + + def test_share_list_summary(self): + self.stubs.Set(share_api.API, 'get_all', + stubs.stub_share_get_all_by_project) + req = fakes.HTTPRequest.blank('/shares') + res_dict = self.controller.index(req) + expected = { + 'shares': [ + { + 'name': 'displayname', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_share_list_detail(self): + self.stubs.Set(share_api.API, 'get_all', + stubs.stub_share_get_all_by_project) + env = {'QUERY_STRING': 'name=Share+Test+Name'} + req = fakes.HTTPRequest.blank('/shares/detail', environ=env) + res_dict = self.controller.detail(req) + expected = { + 'shares': [ + { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'export_location': 'fake_location', + 'availability_zone': 'fakeaz', + 'name': 'displayname', + 'share_proto': 'fakeproto', + 'id': '1', + 'snapshot_id': '2', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_remove_invalid_options(self): + ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) + search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + expected_opts = {'a': 'a', 'c': 'c'} + allowed_opts = ['a', 'c'] + self.mox.ReplayAll() + shares.remove_invalid_options(ctx, search_opts, allowed_opts) + self.assertEqual(search_opts, expected_opts) + + def test_remove_invalid_options_admin(self): + ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) + search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + allowed_opts = ['a', 'c'] + self.mox.ReplayAll() + shares.remove_invalid_options(ctx, search_opts, allowed_opts) + self.assertEqual(search_opts, expected_opts) diff --git a/cinder/tests/api/contrib/test_types_extra_specs.py b/cinder/tests/api/contrib/test_types_extra_specs.py new file mode 100644 index 0000000000..7b5b46931e --- /dev/null +++ b/cinder/tests/api/contrib/test_types_extra_specs.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.contrib import types_extra_specs +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.tests.api import fakes +import cinder.wsgi + + +def return_create_volume_type_extra_specs(context, volume_type_id, + extra_specs): + return stub_volume_type_extra_specs() + + +def return_volume_type_extra_specs(context, volume_type_id): + return stub_volume_type_extra_specs() + + +def return_empty_volume_type_extra_specs(context, volume_type_id): + return {} + + +def delete_volume_type_extra_specs(context, volume_type_id, key): + pass + + +def stub_volume_type_extra_specs(): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return specs + + +def volume_type_get(context, volume_type_id): + pass + + +class VolumeTypesExtraSpecsTest(test.TestCase): + + def setUp(self): + super(VolumeTypesExtraSpecsTest, self).setUp() + self.flags(connection_type='fake', + host='fake', + notification_driver=[test_notifier.__name__]) + self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get) + self.api_path = '/v2/fake/os-volume-types/1/extra_specs' + self.controller = types_extra_specs.VolumeTypeExtraSpecsController() + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypesExtraSpecsTest, self).tearDown() + + def test_index(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.index(req, 1) + + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.index(req, 1) + + self.assertEqual(0, len(res_dict['extra_specs'])) + + def test_show(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key5') + res_dict = self.controller.show(req, 1, 'key5') + + self.assertEqual('value5', res_dict['key5']) + + def test_show_spec_not_found(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, 1, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', + delete_volume_type_extra_specs) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank(self.api_path + '/key5') + self.controller.delete(req, 1, 'key5') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + def test_create(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"extra_specs": {"key1": "value1"}} + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.create(req, 1, body) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_update_item(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1"} + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + res_dict = self.controller.update(req, 1, 'key1', body) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + self.assertEqual('value1', res_dict['key1']) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1", "key2": "value2"} + + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'key1', body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1"} + + req = fakes.HTTPRequest.blank(self.api_path + '/bad') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'bad', body) + + def _extra_specs_empty_update(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, '1', body) + + def test_update_no_body(self): + self._extra_specs_empty_update(body=None) + + def test_update_empty_body(self): + self._extra_specs_empty_update(body={}) + + def _extra_specs_create_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') + req.method = 'POST' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, '1', body) + + def test_create_no_body(self): + self._extra_specs_create_bad_body(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._extra_specs_create_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'extra_specs': 'string'} + self._extra_specs_create_bad_body(body=body) + + +class VolumeTypeExtraSpecsSerializerTest(test.TestCase): + def test_index_create_serializer(self): + serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate() + + # Just getting some input data + extra_specs = stub_volume_type_extra_specs() + text = serializer.serialize(dict(extra_specs=extra_specs)) + + print text + tree = etree.fromstring(text) + + self.assertEqual('extra_specs', tree.tag) + self.assertEqual(len(extra_specs), len(tree)) + seen = set(extra_specs.keys()) + for child in tree: + self.assertTrue(child.tag in seen) + self.assertEqual(extra_specs[child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_update_show_serializer(self): + serializer = types_extra_specs.VolumeTypeExtraSpecTemplate() + + exemplar = dict(key1='value1') + text = serializer.serialize(exemplar) + + print text + tree = etree.fromstring(text) + + self.assertEqual('key1', tree.tag) + self.assertEqual('value1', tree.text) + self.assertEqual(0, len(tree)) diff --git a/cinder/tests/api/contrib/test_types_manage.py b/cinder/tests/api/contrib/test_types_manage.py new file mode 100644 index 0000000000..c007236ec4 --- /dev/null +++ b/cinder/tests/api/contrib/test_types_manage.py @@ -0,0 +1,129 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api.contrib import types_manage +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import volume_types + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_destroy(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + pass + + +def return_volume_types_create(context, name, specs): + pass + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesManageApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesManageApiTest, self).setUp() + self.flags(connection_type='fake', + host='fake', + notification_driver=[test_notifier.__name__]) + self.controller = types_manage.VolumeTypesManageController() + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypesManageApiTest, self).tearDown() + + def test_volume_types_delete(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + + req = fakes.HTTPRequest.blank('/v2/fake/types/1') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + self.controller._delete(req, 1) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + def test_volume_types_delete_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, + req, '777') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + def test_create(self): + self.stubs.Set(volume_types, 'create', + return_volume_types_create) + self.stubs.Set(volume_types, 'get_volume_type_by_name', + return_volume_types_get_by_name) + + body = {"volume_type": {"name": "vol_type_1", + "extra_specs": {"key1": "value1"}}} + req = fakes.HTTPRequest.blank('/v2/fake/types') + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + res_dict = self.controller._create(req, body) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + self.assertEqual(1, len(res_dict)) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def _create_volume_type_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types') + req.method = 'POST' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._create, req, body) + + def test_create_no_body(self): + self._create_volume_type_bad_body(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._create_volume_type_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'volume_type': 'string'} + self._create_volume_type_bad_body(body=body) diff --git a/cinder/tests/api/contrib/test_volume_actions.py b/cinder/tests/api/contrib/test_volume_actions.py new file mode 100644 index 0000000000..aed906985d --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_actions.py @@ -0,0 +1,248 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import uuid +import webob + +from cinder.api.contrib import volume_actions +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common.rpc import common as rpc_common +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder import volume +from cinder.volume import api as volume_api + + +FLAGS = flags.FLAGS + + +def fake_volume_api(*args, **kwargs): + return True + + +def fake_volume_get(*args, **kwargs): + return {'id': 'fake', 'host': 'fake'} + + +class VolumeActionsTest(test.TestCase): + + _actions = ('os-detach', 'os-reserve', 'os-unreserve') + + _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') + + def setUp(self): + super(VolumeActionsTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_api) + self.UUID = uuid.uuid4() + for _method in self._methods: + self.stubs.Set(volume.API, _method, fake_volume_api) + + self.stubs.Set(volume.API, 'get', fake_volume_get) + + def test_simple_api_actions(self): + app = fakes.wsgi_app() + for _action in self._actions: + req = webob.Request.blank('/v2/fake/volumes/%s/action' % + self.UUID) + req.method = 'POST' + req.body = jsonutils.dumps({_action: None}) + req.content_type = 'application/json' + res = req.get_response(app) + self.assertEqual(res.status_int, 202) + + def test_initialize_connection(self): + def fake_initialize_connection(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'initialize_connection', + fake_initialize_connection) + + body = {'os-initialize_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + + def test_terminate_connection(self): + def fake_terminate_connection(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'terminate_connection', + fake_terminate_connection) + + body = {'os-terminate_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_attach(self): + body = {'os-attach': {'instance_uuid': 'fake', + 'mountpoint': '/dev/vdc'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + +def stub_volume_get(self, context, volume_id): + volume = stubs.stub_volume(volume_id) + if volume_id == 5: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + return volume + + +def stub_upload_volume_to_image_service(self, context, volume, metadata, + force): + ret = {"id": volume['id'], + "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), + "status": 'uploading', + "display_description": volume['display_description'], + "size": volume['size'], + "volume_type": volume['volume_type'], + "image_id": 1, + "container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name'} + return ret + + +class VolumeImageActionsTest(test.TestCase): + def setUp(self): + super(VolumeImageActionsTest, self).setUp() + self.controller = volume_actions.VolumeActionsController() + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + def test_copy_volume_to_image(self): + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + res_dict = self.controller._volume_upload_image(req, id, body) + expected = {'os-volume_upload_image': {'id': id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'uploading', + 'display_description': 'displaydesc', + 'size': 1, + 'volume_type': {'name': 'vol_type_name'}, + 'image_id': 1, + 'container_format': 'bare', + 'disk_format': 'raw', + 'image_name': 'image_name'}} + self.assertDictMatch(res_dict, expected) + + def test_copy_volume_to_image_volumenotfound(self): + def stub_volume_get_raise_exc(self, context, volume_id): + raise exception.VolumeNotFound(volume_id=volume_id) + + self.stubs.Set(volume_api.API, 'get', stub_volume_get_raise_exc) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_invalidvolume(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise exception.InvalidVolume(reason='blah') + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_valueerror(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise ValueError + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_remoteerror(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise rpc_common.RemoteError + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) diff --git a/cinder/tests/api/contrib/test_volume_host_attribute.py b/cinder/tests/api/contrib/test_volume_host_attribute.py new file mode 100644 index 0000000000..a54c53f93d --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_host_attribute.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeHostAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeHostAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-host-attr:host'], 'host001') + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertFalse('os-vol-host-attr:host' in vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-host-attr:host'], 'host001') + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-host-attr:host' in vol[0]) + + def test_list_simple_volumes_no_host(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-host-attr:host' in vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + host_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_host_attribute/api/v1}host') + self.assertEqual(vol.get(host_key), 'host001') + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + host_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_host_attribute/api/v1}host') + self.assertEqual(vol.get(host_key), 'host001') diff --git a/cinder/tests/api/contrib/test_volume_image_metadata.py b/cinder/tests/api/contrib/test_volume_image_metadata.py new file mode 100644 index 0000000000..2c343cfcb4 --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_image_metadata.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid +from xml.dom import minidom + +import webob + +from cinder.api import common +from cinder.api.openstack.wsgi import MetadataXMLDeserializer +from cinder.api.openstack.wsgi import XMLDeserializer +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +fake_image_metadata = { + 'image_id': 'someid', + 'image_name': 'fake', + 'kernel_id': 'somekernel', + 'ramdisk_id': 'someramdisk', +} + + +def fake_get_volume_image_metadata(*args, **kwargs): + return fake_image_metadata + + +class VolumeImageMetadataTest(test.TestCase): + content_type = 'application/json' + + def setUp(self): + super(VolumeImageMetadataTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.stubs.Set(volume.API, 'get_volume_image_metadata', + fake_get_volume_image_metadata) + self.UUID = uuid.uuid4() + + def _make_request(self, url): + req = webob.Request.blank(url) + req.accept = self.content_type + res = req.get_response(fakes.wsgi_app()) + return res + + def _get_image_metadata(self, body): + return json.loads(body)['volume']['volume_image_metadata'] + + def _get_image_metadata_list(self, body): + return [ + volume['volume_image_metadata'] + for volume in json.loads(body)['volumes'] + ] + + def test_get_volume(self): + res = self._make_request('/v2/fake/volumes/%s' % self.UUID) + self.assertEqual(res.status_int, 200) + self.assertEqual(self._get_image_metadata(res.body), + fake_image_metadata) + + def test_list_detail_volumes(self): + res = self._make_request('/v2/fake/volumes/detail') + self.assertEqual(res.status_int, 200) + self.assertEqual(self._get_image_metadata_list(res.body)[0], + fake_image_metadata) + + +class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): + metadata_node_name = "volume_image_metadata" + + +class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): + content_type = 'application/xml' + + def _get_image_metadata(self, body): + deserializer = XMLDeserializer() + volume = deserializer.find_first_child_named( + minidom.parseString(body), 'volume') + image_metadata = deserializer.find_first_child_named( + volume, 'volume_image_metadata') + return MetadataXMLDeserializer().extract_metadata(image_metadata) + + def _get_image_metadata_list(self, body): + deserializer = XMLDeserializer() + volumes = deserializer.find_first_child_named( + minidom.parseString(body), 'volumes') + volume_list = deserializer.find_children_named(volumes, 'volume') + image_metadata_list = [ + deserializer.find_first_child_named( + volume, 'volume_image_metadata' + ) + for volume in volume_list] + return map(MetadataXMLDeserializer().extract_metadata, + image_metadata_list) diff --git a/cinder/tests/api/contrib/test_volume_tenant_attribute.py b/cinder/tests/api/contrib/test_volume_tenant_attribute.py new file mode 100644 index 0000000000..e6e10e1e2a --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_tenant_attribute.py @@ -0,0 +1,137 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9' + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': PROJECT_ID, + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeTenantAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeTenantAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-tenant-attr:tenant_id'], PROJECT_ID) + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertFalse('os-vol-tenant-attr:tenant_id' in vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-tenant-attr:tenant_id'], PROJECT_ID) + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-tenant-attr:tenant_id' in vol[0]) + + def test_list_simple_volumes_no_tenant_id(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-tenant-attr:tenant_id' in vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + tenant_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_tenant_attribute/api/v1}tenant_id') + self.assertEqual(vol.get(tenant_key), PROJECT_ID) + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + tenant_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_tenant_attribute/api/v1}tenant_id') + self.assertEqual(vol.get(tenant_key), PROJECT_ID) diff --git a/cinder/tests/api/extensions/__init__.py b/cinder/tests/api/extensions/__init__.py new file mode 100644 index 0000000000..848908a953 --- /dev/null +++ b/cinder/tests/api/extensions/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/tests/api/extensions/foxinsocks.py b/cinder/tests/api/extensions/foxinsocks.py new file mode 100644 index 0000000000..45fc106b88 --- /dev/null +++ b/cinder/tests/api/extensions/foxinsocks.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi + + +class FoxInSocksController(object): + + def index(self, req): + return "Try to say this Mr. Knox, sir..." + + +class FoxInSocksServerControllerExtension(wsgi.Controller): + @wsgi.action('add_tweedle') + def _add_tweedle(self, req, id, body): + + return "Tweedle Beetle Added." + + @wsgi.action('delete_tweedle') + def _delete_tweedle(self, req, id, body): + + return "Tweedle Beetle Deleted." + + @wsgi.action('fail') + def _fail(self, req, id, body): + + raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') + + +class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): + @wsgi.extends + def show(self, req, resp_obj, id): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') + + +class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): + @wsgi.extends + def show(self, req, resp_obj, id): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + resp_obj.obj['big_bands'] = 'Pig Bands!' + + +class Foxinsocks(extensions.ExtensionDescriptor): + """The Fox In Socks Extension""" + + name = "Fox In Socks" + alias = "FOXNSOX" + namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" + updated = "2011-01-22T13:25:27-06:00" + + def __init__(self, ext_mgr): + ext_mgr.register(self) + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('foxnsocks', + FoxInSocksController()) + resources.append(resource) + return resources + + def get_controller_extensions(self): + extension_list = [] + + extension_set = [ + (FoxInSocksServerControllerExtension, 'servers'), + (FoxInSocksFlavorGooseControllerExtension, 'flavors'), + (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] + for klass, collection in extension_set: + controller = klass() + ext = extensions.ControllerExtension(self, collection, controller) + extension_list.append(ext) + + return extension_list diff --git a/cinder/tests/api/fakes.py b/cinder/tests/api/fakes.py new file mode 100644 index 0000000000..3fd21cfbaf --- /dev/null +++ b/cinder/tests/api/fakes.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import routes +import webob +import webob.dec +import webob.request + +from cinder.api.middleware import auth +from cinder.api.middleware import fault +from cinder.api.openstack import wsgi as os_wsgi +from cinder.api import urlmap +from cinder.api.v2 import limits +from cinder.api.v2 import router +from cinder.api import versions +from cinder import context +from cinder import exception as exc +from cinder.openstack.common import timeutils +from cinder import wsgi + + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +class Context(object): + pass + + +class FakeRouter(wsgi.Router): + def __init__(self, ext_mgr=None): + pass + + @webob.dec.wsgify + def __call__(self, req): + res = webob.Response() + res.status = '200' + res.headers['X-Test-Success'] = 'True' + return res + + +@webob.dec.wsgify +def fake_wsgi(self, req): + return self.application + + +def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, + use_no_auth=False, ext_mgr=None): + if not inner_app_v2: + inner_app_v2 = router.APIRouter(ext_mgr) + + if fake_auth: + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext('fake', 'fake', auth_token=True) + api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, + inner_app_v2)) + elif use_no_auth: + api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + else: + api_v2 = fault.FaultWrapper(auth.AuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + + mapper = urlmap.URLMap() + mapper['/v2'] = api_v2 + mapper['/'] = fault.FaultWrapper(versions.Versions()) + return mapper + + +def stub_out_rate_limiting(stubs): + def fake_rate_init(self, app): + # super(limits.RateLimitingMiddleware, self).__init__(app) + self.application = app + + # FIXME(ja): unsure about limits in volumes + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__init__', fake_rate_init) + + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__call__', fake_wsgi) + + +def stub_out_key_pair_funcs(stubs, have_key_pair=True): + def key_pair(context, user_id): + return [dict(name='key', public_key='public_key')] + + def one_key_pair(context, user_id, name): + if name == 'key': + return dict(name='key', public_key='public_key') + else: + raise exc.KeypairNotFound(user_id=user_id, name=name) + + def no_key_pair(context, user_id): + return [] + + +class FakeToken(object): + id_count = 0 + + def __getitem__(self, key): + return getattr(self, key) + + def __init__(self, **kwargs): + FakeToken.id_count += 1 + self.id = FakeToken.id_count + for k, v in kwargs.iteritems(): + setattr(self, k, v) + + +class FakeRequestContext(context.RequestContext): + def __init__(self, *args, **kwargs): + kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') + return super(FakeRequestContext, self).__init__(*args, **kwargs) + + +class HTTPRequest(webob.Request): + + @classmethod + def blank(cls, *args, **kwargs): + kwargs['base_url'] = 'http://localhost/v1' + use_admin_context = kwargs.pop('use_admin_context', False) + out = webob.Request.blank(*args, **kwargs) + out.environ['cinder.context'] = FakeRequestContext( + 'fake_user', + 'fake', + is_admin=use_admin_context) + return out + + +class TestRouter(wsgi.Router): + def __init__(self, controller): + mapper = routes.Mapper() + mapper.resource("test", "tests", + controller=os_wsgi.Resource(controller)) + super(TestRouter, self).__init__(mapper) + + +class FakeAuthDatabase(object): + data = {} + + @staticmethod + def auth_token_get(context, token_hash): + return FakeAuthDatabase.data.get(token_hash, None) + + @staticmethod + def auth_token_create(context, token): + fake_token = FakeToken(created_at=timeutils.utcnow(), **token) + FakeAuthDatabase.data[fake_token.token_hash] = fake_token + FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token + return fake_token + + @staticmethod + def auth_token_destroy(context, token_id): + token = FakeAuthDatabase.data.get('id_%i' % token_id) + if token and token.token_hash in FakeAuthDatabase.data: + del FakeAuthDatabase.data[token.token_hash] + del FakeAuthDatabase.data['id_%i' % token_id] + + +class FakeRateLimiter(object): + def __init__(self, application): + self.application = application + + @webob.dec.wsgify + def __call__(self, req): + return self.application + + +def get_fake_uuid(token=0): + if token not in FAKE_UUIDS: + FAKE_UUIDS[token] = str(uuid.uuid4()) + return FAKE_UUIDS[token] diff --git a/cinder/tests/api/middleware/__init__.py b/cinder/tests/api/middleware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/middleware/test_auth.py b/cinder/tests/api/middleware/test_auth.py new file mode 100644 index 0000000000..4fca13fe62 --- /dev/null +++ b/cinder/tests/api/middleware/test_auth.py @@ -0,0 +1,59 @@ +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +import cinder.api.middleware.auth +from cinder import test + + +class TestCinderKeystoneContextMiddleware(test.TestCase): + + def setUp(self): + super(TestCinderKeystoneContextMiddleware, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + self.context = req.environ['cinder.context'] + return webob.Response() + + self.context = None + self.middleware = (cinder.api.middleware.auth + .CinderKeystoneContext(fake_app)) + self.request = webob.Request.blank('/') + self.request.headers['X_TENANT_ID'] = 'testtenantid' + self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' + + def test_no_user_or_user_id(self): + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '401 Unauthorized') + + def test_user_only(self): + self.request.headers['X_USER_ID'] = 'testuserid' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') + + def test_user_id_only(self): + self.request.headers['X_USER'] = 'testuser' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuser') + + def test_user_id_trumps_user(self): + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_USER'] = 'testuser' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') diff --git a/cinder/tests/api/middleware/test_faults.py b/cinder/tests/api/middleware/test_faults.py new file mode 100644 index 0000000000..aff1dfbcc9 --- /dev/null +++ b/cinder/tests/api/middleware/test_faults.py @@ -0,0 +1,208 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.dom import minidom + +import webob +import webob.dec +import webob.exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.openstack.common import jsonutils +from cinder import test + + +class TestFaults(test.TestCase): + """Tests covering `cinder.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + """Remove characters from string which hinder XML equality testing.""" + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault_json(self): + """Test fault serialized to JSON via file-extension and/or header.""" + requests = [ + webob.Request.blank('/.json'), + webob.Request.blank('/', headers={"Accept": "application/json"}), + ] + + for request in requests: + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + expected = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + actual = jsonutils.loads(response.body) + + self.assertEqual(response.content_type, "application/json") + self.assertEqual(expected, actual) + + def test_413_fault_json(self): + """Test fault serialized to JSON via file-extension and/or header.""" + requests = [ + webob.Request.blank('/.json'), + webob.Request.blank('/', headers={"Accept": "application/json"}), + ] + + for request in requests: + exc = webob.exc.HTTPRequestEntityTooLarge + fault = wsgi.Fault(exc(explanation='sorry', + headers={'Retry-After': 4})) + response = request.get_response(fault) + + expected = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + actual = jsonutils.loads(response.body) + + self.assertEqual(response.content_type, "application/json") + self.assertEqual(expected, actual) + + def test_raise(self): + """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?')) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 404) + self.assertTrue('whut?' in resp.body) + + def test_raise_403(self): + """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?')) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 403) + self.assertTrue('resizeNotAllowed' not in resp.body) + self.assertTrue('forbidden' in resp.body) + + def test_fault_has_status_int(self): + """Ensure the status_int is set correctly on faults""" + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) + self.assertEqual(fault.status_int, 400) + + def test_xml_serializer(self): + """Ensure that a v1.1 request responds with a v1 xmlns""" + request = webob.Request.blank('/v1', + headers={"Accept": "application/xml"}) + + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + self.assertTrue(common.XML_NS_V1 in response.body) + self.assertEqual(response.content_type, "application/xml") + self.assertEqual(response.status_int, 400) + + +class FaultsXMLSerializationTestV11(test.TestCase): + """Tests covering `cinder.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault(self): + metadata = {'attributes': {"badRequest": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + scram + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_413_fault(self): + metadata = {'attributes': {"overLimit": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + sorry + 4 + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_404_fault(self): + metadata = {'attributes': {"itemNotFound": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "itemNotFound": { + "message": "sorry", + "code": 404, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + sorry + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/cinder/tests/api/middleware/test_sizelimit.py b/cinder/tests/api/middleware/test_sizelimit.py new file mode 100644 index 0000000000..3b87a2cd96 --- /dev/null +++ b/cinder/tests/api/middleware/test_sizelimit.py @@ -0,0 +1,100 @@ +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO +import webob + +from cinder.api.middleware import sizelimit +from cinder import flags +from cinder import test + +FLAGS = flags.FLAGS +MAX_REQUEST_BODY_SIZE = FLAGS.osapi_max_request_body_size + + +class TestLimitingReader(test.TestCase): + + def test_limiting_reader(self): + BYTES = 1024 + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + for chunk in sizelimit.LimitingReader(data, BYTES): + bytes_read += len(chunk) + + self.assertEquals(bytes_read, BYTES) + + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + reader = sizelimit.LimitingReader(data, BYTES) + byte = reader.read(1) + while len(byte) != 0: + bytes_read += 1 + byte = reader.read(1) + + self.assertEquals(bytes_read, BYTES) + + def test_limiting_reader_fails(self): + BYTES = 1024 + + def _consume_all_iter(): + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + for chunk in sizelimit.LimitingReader(data, BYTES - 1): + bytes_read += len(chunk) + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + _consume_all_iter) + + def _consume_all_read(): + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + reader = sizelimit.LimitingReader(data, BYTES - 1) + byte = reader.read(1) + while len(byte) != 0: + bytes_read += 1 + byte = reader.read(1) + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + _consume_all_read) + + +class TestRequestBodySizeLimiter(test.TestCase): + + def setUp(self): + super(TestRequestBodySizeLimiter, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + return webob.Response(req.body) + + self.middleware = sizelimit.RequestBodySizeLimiter(fake_app) + self.request = webob.Request.blank('/', method='POST') + + def test_content_length_acceptable(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + self.request.body = "0" * MAX_REQUEST_BODY_SIZE + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 200) + + def test_content_length_too_large(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 413) + + def test_request_too_large_no_content_length(self): + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + self.request.headers['Content-Length'] = None + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 413) diff --git a/cinder/tests/api/openstack/__init__.py b/cinder/tests/api/openstack/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/api/openstack/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/openstack/test_wsgi.py b/cinder/tests/api/openstack/test_wsgi.py new file mode 100644 index 0000000000..77308602d0 --- /dev/null +++ b/cinder/tests/api/openstack/test_wsgi.py @@ -0,0 +1,858 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import inspect +import webob + +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import test +from cinder.tests.api import fakes + + +class RequestTest(test.TestCase): + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = "" + self.assertEqual(None, request.get_content_type()) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.headers["Content-Type"] = "text/html" + request.body = "asdf
" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept(self): + for content_type in ('application/xml', + 'application/vnd.openstack.volume+xml', + 'application/json', + 'application/vnd.openstack.volume+json'): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) + + def test_content_type_from_accept_best(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_m