From ab766108a556f9411648f76344245f391732dbc4 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 12 Sep 2017 16:21:38 -0600 Subject: [PATCH] Retire Packaging Deb project repos This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: I548868a77d769b1466ab02aa43a9f290bf592643 --- .coveragerc | 8 - .gitignore | 29 - .gitreview | 4 - .testr.conf | 7 - AUTHORS.rst | 14 - CONTRIBUTING.rst | 19 - HACKING.rst | 136 -- LICENSE | 176 -- README | 14 + README.rst | 73 - api-ref/source/claims.inc | 218 -- api-ref/source/conf.py | 229 --- api-ref/source/flavors.inc | 207 -- api-ref/source/health.inc | 69 - api-ref/source/index.rst | 27 - api-ref/source/messages.inc | 334 --- api-ref/source/parameters.yaml | 498 ----- api-ref/source/pools.inc | 212 -- api-ref/source/queues.inc | 366 ---- .../samples/claim_messages_request.json | 4 - .../samples/claim_messages_response.json | 12 - .../source/samples/claim_query_response.json | 15 - .../source/samples/claim_update_request.json | 4 - .../source/samples/flavor-create-request.json | 3 - .../source/samples/flavor-list-response.json | 22 - .../source/samples/flavor-show-response.json | 12 - .../source/samples/flavor-update-request.json | 3 - .../samples/flavor-update-response.json | 12 - api-ref/source/samples/health-response.json | 48 - .../samples/messages-get-byids-response.json | 15 - .../source/samples/messages-get-response.json | 11 - .../samples/messages-list-response.json | 32 - .../source/samples/messages-post-request.json | 18 - .../samples/messages-post-response.json | 6 - .../source/samples/pool-create-request.json | 8 - .../source/samples/pool-list-response.json | 24 - .../source/samples/pool-show-response.json | 7 - .../source/samples/pool-update-request.json | 8 - .../source/samples/pool-update-response.json | 7 - .../source/samples/purge-queue-request.json | 3 - .../source/samples/queue-create-request.json | 5 - .../samples/queue-pre-signed-request.json | 5 - .../samples/queue-pre-signed-response.json | 16 - .../source/samples/queue-show-response.json | 5 - .../source/samples/queue-stats-response.json | 7 - .../source/samples/queue-update-request.json | 7 - .../source/samples/queue-update-response.json | 3 - .../source/samples/queues-list-response.json | 22 - .../subscription-create-request-http.json | 5 - .../subscription-create-request-mail.json | 8 - .../samples/subscription-create-response.json | 3 - .../samples/subscription-show-response.json | 10 - .../samples/subscription-update-request.json | 7 - .../samples/subscriptions-list-response.json | 26 - .../samples/versions-list-response.json | 55 - api-ref/source/subscription.inc | 229 --- api-ref/source/versions.inc | 40 - babel.cfg | 2 - bench-requirements.txt | 4 - devstack/README.rst | 15 - devstack/gate/gate_hook.sh | 73 - devstack/gate/post_test_hook.sh | 39 - devstack/plugin.sh | 320 --- devstack/settings | 48 - devstack/upgrade/resource.sh | 69 - devstack/upgrade/settings | 19 - devstack/upgrade/shutdown.sh | 26 - devstack/upgrade/upgrade.sh | 108 - doc/README.md | 2 - doc/source/admin/CORS.rst | 120 -- doc/source/admin/OSprofiler.rst | 124 -- doc/source/admin/gmr.rst | 87 - doc/source/admin/index.rst | 13 - doc/source/admin/running_benchmark.rst | 184 -- doc/source/admin/subscription_confirm.rst | 298 --- doc/source/admin/writing_pipeline_stages.rst | 225 -- doc/source/conf.py | 234 --- .../contributor/development.environment.rst | 298 --- doc/source/contributor/first_patch.rst | 320 --- doc/source/contributor/first_review.rst | 115 -- doc/source/contributor/gerrit.rst | 28 - .../contributor/images/zaqar_review_id.png | Bin 77939 -> 0 bytes doc/source/contributor/index.rst | 42 - doc/source/contributor/jenkins.rst | 32 - doc/source/contributor/launchpad.rst | 56 - doc/source/contributor/reviewer_guide.rst | 165 -- doc/source/contributor/running_tests.rst | 167 -- doc/source/contributor/storage.rst | 32 - doc/source/contributor/test_suite.rst | 96 - doc/source/contributor/transport.rst | 9 - doc/source/contributor/welcome.rst | 187 -- doc/source/glossary.rst | 77 - doc/source/index.rst | 108 - doc/source/install/get_started.rst | 65 - doc/source/install/index.rst | 41 - doc/source/install/install-obs.rst | 545 ----- doc/source/install/install-rdo.rst | 545 ----- doc/source/install/install-ubuntu.rst | 529 ----- doc/source/install/install.rst | 35 - doc/source/install/next-steps.rst | 9 - doc/source/install/verify.rst | 36 - doc/source/user/authentication_tokens.rst | 37 - doc/source/user/getting_started.rst | 387 ---- doc/source/user/headers_queue_api_working.rst | 356 ---- doc/source/user/index.rst | 11 - doc/source/user/send_request_api.rst | 89 - dox.yml | 12 - etc/logging.conf.sample | 49 - etc/oslo-config-generator/zaqar.conf | 22 - etc/policy.json.sample | 48 - etc/uwsgi.conf | 7 - etc/zaqar-benchmark-messages.json | 72 - etc/zaqar-benchmark.conf.sample | 5 - rally-jobs/README.rst | 29 - rally-jobs/extra/README.rst | 7 - rally-jobs/plugins/README.rst | 12 - rally-jobs/plugins/__init__.py | 0 rally-jobs/zaqar-zaqar.yaml | 12 - releasenotes/notes/.gitignore | 0 ...Sprofiler-with-zaqar-59d0dc3d0326947d.yaml | 6 - ...notifier-using-trust-271d9cd1d2b4cdeb.yaml | 10 - .../add-swift-backend-4eb9b43913f39d18.yaml | 6 - ...bsocket-notification-fa542fbf761378d3.yaml | 7 - .../notes/deprecate-v11-976cccc1b56a28e7.yaml | 6 - ...ut-reserved-metadata-b53857ed9821fe76.yaml | 5 - ..._issue_for_root_path-b15e1c4e92e4e8b1.yaml | 7 - ...x_subscription_limit-c3cdc9385825285a.yaml | 5 - ...roduce-guru-to-zaqar-ac7b51c764503829.yaml | 6 - ...ues-in-subscriptions-6bade4a1b8eca3e5.yaml | 8 - .../notes/purge-queue-6788a249ee59d55a.yaml | 4 - ...attributes_for_queue-3d87333752484c87.yaml | 8 - .../notes/sql_init-c9b3883241631f24.yaml | 7 - ...sqlalchemy-migration-6b4eaebb6e02a449.yaml | 3 - ...mation-support-email-0c2a56cfedc5d1e2.yaml | 15 - .../notes/support-cors-af8349382a44aa0d.yaml | 3 - ...rt-dot-in-queue-name-bd2b3d523f55451f.yaml | 3 - ...-deprecated-versions-44656aeb8ebb8881.yaml | 7 - ...er_queue_for_mongodb-c8b7303319e7f920.yaml | 8 - .../user_ipv6_sockets-1e1b436de6b81ae3.yaml | 7 - ...ription_confirmation-883cb7f325885ef0.yaml | 9 - releasenotes/source/_static/.gitignore | 0 releasenotes/source/_templates/.gitignore | 0 releasenotes/source/conf.py | 278 --- releasenotes/source/index.rst | 12 - releasenotes/source/liberty.rst | 6 - releasenotes/source/mitaka.rst | 6 - releasenotes/source/newton.rst | 6 - releasenotes/source/ocata.rst | 6 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 34 - .../html/confirmation_web_service_sample.py | 86 - samples/html/subscriptionConfirmation.html | 148 -- samples/html/unsubscriptionConfirmation.html | 145 -- .../receive_message/JsonDecoder.java | 43 - .../receive_message/SampleZaqarEndpoint.java | 57 - .../send_message/SampleZaqarEndpoint.java | 45 - .../receive_message/zaqar_sample.js | 32 - .../javascript/send_message/zaqar_sample.js | 25 - samples/javascript/websocket.html | 309 --- .../receive_message/SampleZaqarServlet.java | 55 - .../send_message/SampleZaqarServlet.java | 52 - .../nodejs/receive_message/zaqar_sample.js | 34 - samples/nodejs/send_message/zaqar_sample.js | 27 - .../receive_message/zaqar_sample.py | 30 - .../send_message/zaqar_sample.py | 27 - samples/zaqar/subscriber_service_sample.py | 79 - setup.cfg | 123 -- setup.py | 29 - test-requirements.txt | 40 - tools/doc/find_autodoc_modules.sh | 20 - tools/doc/generate_autodoc_index.sh | 46 - tools/test-setup.sh | 33 - tox.ini | 79 - zaqar/__init__.py | 22 - zaqar/api/__init__.py | 0 zaqar/api/handler.py | 131 -- zaqar/api/v1/__init__.py | 0 zaqar/api/v1/request.py | 387 ---- zaqar/api/v1/response.py | 301 --- zaqar/api/v1_1/__init__.py | 0 zaqar/api/v1_1/request.py | 244 --- zaqar/api/v1_1/response.py | 411 ---- zaqar/api/v2/__init__.py | 0 zaqar/api/v2/endpoints.py | 969 --------- zaqar/api/v2/request.py | 126 -- zaqar/api/v2/response.py | 411 ---- zaqar/bench/__init__.py | 0 zaqar/bench/conductor.py | 105 - zaqar/bench/config.py | 79 - zaqar/bench/consumer.py | 195 -- zaqar/bench/helpers.py | 135 -- zaqar/bench/observer.py | 180 -- zaqar/bench/producer.py | 191 -- zaqar/bootstrap.py | 129 -- zaqar/cmd/__init__.py | 0 zaqar/cmd/gc.py | 35 - zaqar/cmd/server.py | 87 - zaqar/common/__init__.py | 18 - zaqar/common/access.py | 21 - zaqar/common/api/__init__.py | 0 zaqar/common/api/api.py | 78 - zaqar/common/api/errors.py | 41 - zaqar/common/api/request.py | 48 - zaqar/common/api/response.py | 42 - zaqar/common/api/schemas/__init__.py | 0 zaqar/common/api/schemas/flavors.py | 63 - zaqar/common/api/schemas/pools.py | 73 - zaqar/common/api/utils.py | 219 -- zaqar/common/auth.py | 95 - zaqar/common/cache.py | 24 - zaqar/common/cli.py | 59 - zaqar/common/configs.py | 126 -- zaqar/common/consts.py | 117 -- zaqar/common/decorators.py | 218 -- zaqar/common/errors.py | 30 - zaqar/common/pipeline.py | 115 -- zaqar/common/storage/__init__.py | 0 zaqar/common/storage/select.py | 58 - zaqar/common/transport/__init__.py | 0 zaqar/common/transport/wsgi/__init__.py | 0 zaqar/common/transport/wsgi/helpers.py | 262 --- zaqar/common/urls.py | 112 - zaqar/common/utils.py | 69 - zaqar/context.py | 55 - zaqar/hacking/__init__.py | 0 zaqar/hacking/checks.py | 51 - zaqar/i18n.py | 27 - zaqar/locale/es/LC_MESSAGES/zaqar.po | 416 ---- .../locale/fr/LC_MESSAGES/zaqar-log-error.po | 43 - zaqar/notification/__init__.py | 0 zaqar/notification/notifier.py | 150 -- zaqar/notification/tasks/__init__.py | 0 zaqar/notification/tasks/mailto.py | 110 - zaqar/notification/tasks/trust.py | 63 - zaqar/notification/tasks/webhook.py | 47 - zaqar/storage/__init__.py | 36 - zaqar/storage/base.py | 1086 ---------- zaqar/storage/configuration.py | 48 - zaqar/storage/errors.py | 212 -- zaqar/storage/mongodb/__init__.py | 64 - zaqar/storage/mongodb/catalogue.py | 107 - zaqar/storage/mongodb/claims.py | 327 --- zaqar/storage/mongodb/controllers.py | 41 - zaqar/storage/mongodb/driver.py | 304 --- zaqar/storage/mongodb/flavors.py | 155 -- zaqar/storage/mongodb/messages.py | 1038 ---------- zaqar/storage/mongodb/options.py | 147 -- zaqar/storage/mongodb/pools.py | 169 -- zaqar/storage/mongodb/queues.py | 290 --- zaqar/storage/mongodb/subscriptions.py | 196 -- zaqar/storage/mongodb/utils.py | 322 --- zaqar/storage/pipeline.py | 177 -- zaqar/storage/pooling.py | 687 ------- zaqar/storage/redis/__init__.py | 53 - zaqar/storage/redis/claims.py | 411 ---- zaqar/storage/redis/controllers.py | 24 - zaqar/storage/redis/driver.py | 292 --- zaqar/storage/redis/messages.py | 614 ------ zaqar/storage/redis/models.py | 320 --- zaqar/storage/redis/options.py | 71 - zaqar/storage/redis/queues.py | 190 -- zaqar/storage/redis/scripting.py | 40 - .../storage/redis/scripts/claim_messages.lua | 101 - .../storage/redis/scripts/index_messages.lua | 39 - zaqar/storage/redis/subscriptions.py | 269 --- zaqar/storage/redis/utils.py | 277 --- zaqar/storage/sqlalchemy/__init__.py | 19 - zaqar/storage/sqlalchemy/catalogue.py | 111 - zaqar/storage/sqlalchemy/controllers.py | 25 - zaqar/storage/sqlalchemy/driver.py | 110 - zaqar/storage/sqlalchemy/flavors.py | 149 -- .../storage/sqlalchemy/migration/__init__.py | 0 .../storage/sqlalchemy/migration/alembic.ini | 54 - .../migration/alembic_migrations/README.md | 73 - .../migration/alembic_migrations/env.py | 96 - .../alembic_migrations/script.py.mako | 34 - .../versions/001_liberty.py | 72 - .../versions/002_placeholder.py | 30 - .../versions/003_placeholder.py | 30 - .../versions/004_placeholder.py | 30 - .../versions/005_placeholder.py | 30 - zaqar/storage/sqlalchemy/migration/cli.py | 118 -- zaqar/storage/sqlalchemy/options.py | 34 - zaqar/storage/sqlalchemy/pools.py | 166 -- zaqar/storage/sqlalchemy/queues.py | 135 -- zaqar/storage/sqlalchemy/tables.py | 61 - zaqar/storage/sqlalchemy/utils.py | 137 -- zaqar/storage/swift/__init__.py | 0 zaqar/storage/swift/claims.py | 194 -- zaqar/storage/swift/controllers.py | 21 - zaqar/storage/swift/driver.py | 123 -- zaqar/storage/swift/messages.py | 374 ---- zaqar/storage/swift/options.py | 36 - zaqar/storage/swift/subscriptions.py | 160 -- zaqar/storage/swift/utils.py | 144 -- zaqar/storage/utils.py | 212 -- zaqar/tests/__init__.py | 30 - zaqar/tests/base.py | 116 -- zaqar/tests/etc/drivers_storage_invalid.conf | 12 - .../tests/etc/drivers_transport_invalid.conf | 11 - zaqar/tests/etc/functional-tests.conf | 12 - zaqar/tests/etc/functional-zaqar.conf | 58 - zaqar/tests/etc/keystone_auth.conf | 14 - zaqar/tests/etc/policy.json | 48 - zaqar/tests/etc/websocket_mongodb.conf | 21 - .../etc/websocket_mongodb_keystone_auth.conf | 21 - .../etc/websocket_mongodb_subscriptions.conf | 24 - zaqar/tests/etc/wsgi_faulty.conf | 12 - zaqar/tests/etc/wsgi_fifo_mongodb.conf | 22 - zaqar/tests/etc/wsgi_mongodb.conf | 25 - .../etc/wsgi_mongodb_default_limits.conf | 6 - zaqar/tests/etc/wsgi_mongodb_pooled.conf | 20 - ...i_mongodb_pooled_disable_virtual_pool.conf | 20 - zaqar/tests/etc/wsgi_mongodb_validation.conf | 13 - zaqar/tests/etc/wsgi_redis.conf | 22 - zaqar/tests/etc/wsgi_redis_pooled.conf | 20 - zaqar/tests/etc/wsgi_sqlalchemy.conf | 15 - zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf | 16 - zaqar/tests/etc/wsgi_swift.conf | 12 - zaqar/tests/faulty_storage.py | 146 -- zaqar/tests/functional/__init__.py | 0 zaqar/tests/functional/base.py | 409 ---- zaqar/tests/functional/config.py | 50 - zaqar/tests/functional/helpers.py | 115 -- zaqar/tests/functional/http.py | 215 -- zaqar/tests/functional/websocket/__init__.py | 0 .../tests/functional/websocket/test_queues.py | 72 - zaqar/tests/functional/wsgi/__init__.py | 0 zaqar/tests/functional/wsgi/test_versions.py | 36 - zaqar/tests/functional/wsgi/v1/__init__.py | 0 zaqar/tests/functional/wsgi/v1/test_claims.py | 259 --- .../tests/functional/wsgi/v1/test_messages.py | 380 ---- zaqar/tests/functional/wsgi/v1/test_queues.py | 440 ---- zaqar/tests/functional/wsgi/v1_1/__init__.py | 0 .../tests/functional/wsgi/v1_1/test_claims.py | 276 --- .../tests/functional/wsgi/v1_1/test_health.py | 84 - .../functional/wsgi/v1_1/test_messages.py | 528 ----- .../tests/functional/wsgi/v1_1/test_pools.py | 234 --- .../tests/functional/wsgi/v1_1/test_queues.py | 355 ---- zaqar/tests/functional/wsgi/v2/__init__.py | 0 .../functional/wsgi/v2/test_subscriptions.py | 123 -- zaqar/tests/helpers.py | 301 --- zaqar/tests/tempest_plugin/__init__.py | 0 .../tempest_plugin/api_schema/__init__.py | 0 .../api_schema/response/__init__.py | 0 .../api_schema/response/v1/__init__.py | 0 .../api_schema/response/v1/queues.py | 238 --- .../api_schema/response/v1_1/__init__.py | 0 .../api_schema/response/v1_1/queues.py | 250 --- .../api_schema/response/v2/__init__.py | 0 .../api_schema/response/v2/queues.py | 297 --- zaqar/tests/tempest_plugin/config.py | 56 - zaqar/tests/tempest_plugin/plugin.py | 43 - .../tests/tempest_plugin/services/__init__.py | 0 .../services/messaging/__init__.py | 0 .../services/messaging/json/__init__.py | 0 .../messaging/json/messaging_client.py | 506 ----- zaqar/tests/tempest_plugin/tests/__init__.py | 0 zaqar/tests/tempest_plugin/tests/base.py | 277 --- .../tests/tempest_plugin/tests/v1/__init__.py | 0 .../tempest_plugin/tests/v1/test_claims.py | 114 - .../tempest_plugin/tests/v1/test_messages.py | 118 -- .../tempest_plugin/tests/v1/test_queues.py | 118 -- .../tempest_plugin/tests/v1_1/__init__.py | 0 .../tempest_plugin/tests/v1_1/test_claims.py | 114 - .../tests/v1_1/test_messages.py | 121 -- .../tempest_plugin/tests/v1_1/test_queues.py | 105 - .../tests/tempest_plugin/tests/v2/__init__.py | 0 .../tempest_plugin/tests/v2/test_claims.py | 114 - .../tests/v2/test_claims_negative.py | 416 ---- .../tempest_plugin/tests/v2/test_messages.py | 121 -- .../tests/v2/test_messages_negative.py | 646 ------ .../tempest_plugin/tests/v2/test_queues.py | 149 -- .../tests/v2/test_queues_negative.py | 239 --- .../tests/v2/test_subscriptions.py | 153 -- .../tests/v2/test_subscriptions_negative.py | 372 ---- zaqar/tests/unit/__init__.py | 0 zaqar/tests/unit/common/__init__.py | 0 zaqar/tests/unit/common/storage/__init__.py | 0 .../tests/unit/common/storage/test_select.py | 66 - zaqar/tests/unit/common/storage/test_utils.py | 62 - zaqar/tests/unit/common/test_api.py | 57 - zaqar/tests/unit/common/test_decorators.py | 185 -- zaqar/tests/unit/common/test_pipeline.py | 95 - zaqar/tests/unit/common/test_request.py | 32 - zaqar/tests/unit/common/test_urls.py | 107 - zaqar/tests/unit/hacking/__init__.py | 0 zaqar/tests/unit/hacking/test_hacking.py | 27 - zaqar/tests/unit/notification/__init__.py | 0 .../tests/unit/notification/test_notifier.py | 413 ---- zaqar/tests/unit/storage/__init__.py | 0 zaqar/tests/unit/storage/base.py | 1828 ----------------- .../storage/sqlalchemy_migration/__init__.py | 0 .../test_db_manage_cli.py | 89 - .../sqlalchemy_migration/test_migrations.py | 173 -- .../test_migrations_base.py | 190 -- zaqar/tests/unit/storage/test_impl_mongodb.py | 589 ------ zaqar/tests/unit/storage/test_impl_redis.py | 475 ----- .../unit/storage/test_impl_sqlalchemy.py | 74 - zaqar/tests/unit/storage/test_impl_swift.py | 57 - zaqar/tests/unit/storage/test_pool_catalog.py | 129 -- zaqar/tests/unit/storage/test_utils.py | 29 - zaqar/tests/unit/test_bootstrap.py | 53 - zaqar/tests/unit/transport/__init__.py | 0 zaqar/tests/unit/transport/test_acl.py | 57 - .../unit/transport/websocket/__init__.py | 0 zaqar/tests/unit/transport/websocket/base.py | 108 - .../unit/transport/websocket/test_protocol.py | 94 - zaqar/tests/unit/transport/websocket/utils.py | 48 - .../unit/transport/websocket/v2/__init__.py | 0 .../unit/transport/websocket/v2/test_auth.py | 258 --- .../transport/websocket/v2/test_claims.py | 439 ---- .../transport/websocket/v2/test_messages.py | 610 ------ .../websocket/v2/test_queue_lifecycle.py | 676 ------ .../websocket/v2/test_subscriptions.py | 390 ---- zaqar/tests/unit/transport/wsgi/__init__.py | 23 - zaqar/tests/unit/transport/wsgi/base.py | 188 -- zaqar/tests/unit/transport/wsgi/test_utils.py | 189 -- .../tests/unit/transport/wsgi/test_version.py | 86 - .../tests/unit/transport/wsgi/v1/__init__.py | 0 .../tests/unit/transport/wsgi/v1/test_auth.py | 43 - .../unit/transport/wsgi/v1/test_claims.py | 257 --- .../transport/wsgi/v1/test_default_limits.py | 99 - .../unit/transport/wsgi/v1/test_health.py | 33 - .../tests/unit/transport/wsgi/v1/test_home.py | 57 - .../unit/transport/wsgi/v1/test_media_type.py | 82 - .../unit/transport/wsgi/v1/test_messages.py | 509 ----- .../unit/transport/wsgi/v1/test_pools.py | 335 --- .../transport/wsgi/v1/test_queue_lifecycle.py | 401 ---- .../unit/transport/wsgi/v1/test_validation.py | 127 -- .../unit/transport/wsgi/v1_1/__init__.py | 0 .../unit/transport/wsgi/v1_1/test_auth.py | 43 - .../unit/transport/wsgi/v1_1/test_claims.py | 315 --- .../wsgi/v1_1/test_default_limits.py | 124 -- .../unit/transport/wsgi/v1_1/test_flavors.py | 341 --- .../unit/transport/wsgi/v1_1/test_health.py | 89 - .../unit/transport/wsgi/v1_1/test_home.py | 71 - .../transport/wsgi/v1_1/test_media_type.py | 81 - .../unit/transport/wsgi/v1_1/test_messages.py | 643 ------ .../unit/transport/wsgi/v1_1/test_ping.py | 38 - .../unit/transport/wsgi/v1_1/test_pools.py | 354 ---- .../wsgi/v1_1/test_queue_lifecycle.py | 391 ---- .../transport/wsgi/v1_1/test_validation.py | 138 -- .../unit/transport/wsgi/v2_0/__init__.py | 0 .../unit/transport/wsgi/v2_0/test_auth.py | 43 - .../unit/transport/wsgi/v2_0/test_claims.py | 316 --- .../wsgi/v2_0/test_default_limits.py | 124 -- .../unit/transport/wsgi/v2_0/test_flavors.py | 347 ---- .../unit/transport/wsgi/v2_0/test_health.py | 89 - .../unit/transport/wsgi/v2_0/test_home.py | 71 - .../transport/wsgi/v2_0/test_media_type.py | 81 - .../unit/transport/wsgi/v2_0/test_messages.py | 685 ------ .../unit/transport/wsgi/v2_0/test_ping.py | 38 - .../unit/transport/wsgi/v2_0/test_pools.py | 372 ---- .../unit/transport/wsgi/v2_0/test_purge.py | 119 -- .../wsgi/v2_0/test_queue_lifecycle.py | 505 ----- .../transport/wsgi/v2_0/test_subscriptions.py | 444 ---- .../unit/transport/wsgi/v2_0/test_urls.py | 232 --- .../transport/wsgi/v2_0/test_validation.py | 203 -- zaqar/transport/__init__.py | 19 - zaqar/transport/acl.py | 44 - zaqar/transport/base.py | 102 - zaqar/transport/middleware/__init__.py | 0 zaqar/transport/middleware/auth.py | 72 - zaqar/transport/middleware/cors.py | 105 - zaqar/transport/middleware/profile.py | 116 -- zaqar/transport/utils.py | 60 - zaqar/transport/validation.py | 638 ------ zaqar/transport/websocket/__init__.py | 21 - zaqar/transport/websocket/driver.py | 136 -- zaqar/transport/websocket/factory.py | 74 - zaqar/transport/websocket/protocol.py | 272 --- zaqar/transport/wsgi/__init__.py | 19 - zaqar/transport/wsgi/app.py | 50 - zaqar/transport/wsgi/driver.py | 193 -- zaqar/transport/wsgi/errors.py | 93 - zaqar/transport/wsgi/utils.py | 242 --- zaqar/transport/wsgi/v1_0/__init__.py | 110 - zaqar/transport/wsgi/v1_0/claims.py | 173 -- zaqar/transport/wsgi/v1_0/health.py | 30 - zaqar/transport/wsgi/v1_0/homedoc.py | 142 -- zaqar/transport/wsgi/v1_0/messages.py | 298 --- zaqar/transport/wsgi/v1_0/metadata.py | 96 - zaqar/transport/wsgi/v1_0/pools.py | 235 --- zaqar/transport/wsgi/v1_0/queues.py | 133 -- zaqar/transport/wsgi/v1_0/stats.py | 73 - zaqar/transport/wsgi/v1_1/__init__.py | 129 -- zaqar/transport/wsgi/v1_1/claims.py | 200 -- zaqar/transport/wsgi/v1_1/flavors.py | 217 -- zaqar/transport/wsgi/v1_1/health.py | 39 - zaqar/transport/wsgi/v1_1/homedoc.py | 292 --- zaqar/transport/wsgi/v1_1/messages.py | 366 ---- zaqar/transport/wsgi/v1_1/ping.py | 30 - zaqar/transport/wsgi/v1_1/pools.py | 253 --- zaqar/transport/wsgi/v1_1/queues.py | 162 -- zaqar/transport/wsgi/v1_1/stats.py | 74 - zaqar/transport/wsgi/v2_0/__init__.py | 151 -- zaqar/transport/wsgi/v2_0/claims.py | 205 -- zaqar/transport/wsgi/v2_0/flavors.py | 258 --- zaqar/transport/wsgi/v2_0/health.py | 43 - zaqar/transport/wsgi/v2_0/homedoc.py | 392 ---- zaqar/transport/wsgi/v2_0/messages.py | 397 ---- zaqar/transport/wsgi/v2_0/ping.py | 37 - zaqar/transport/wsgi/v2_0/pools.py | 277 --- zaqar/transport/wsgi/v2_0/purge.py | 83 - zaqar/transport/wsgi/v2_0/queues.py | 302 --- zaqar/transport/wsgi/v2_0/stats.py | 78 - zaqar/transport/wsgi/v2_0/subscriptions.py | 304 --- zaqar/transport/wsgi/v2_0/urls.py | 78 - zaqar/transport/wsgi/version.py | 39 - zaqar/version.py | 53 - zaqar_upgradetests/post_test_hook.sh | 0 zaqar_upgradetests/pre_test_hook.sh | 0 513 files changed, 14 insertions(+), 63286 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .testr.conf delete mode 100644 AUTHORS.rst delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE create mode 100644 README delete mode 100644 README.rst delete mode 100644 api-ref/source/claims.inc delete mode 100644 api-ref/source/conf.py delete mode 100644 api-ref/source/flavors.inc delete mode 100644 api-ref/source/health.inc delete mode 100644 api-ref/source/index.rst delete mode 100644 api-ref/source/messages.inc delete mode 100644 api-ref/source/parameters.yaml delete mode 100644 api-ref/source/pools.inc delete mode 100644 api-ref/source/queues.inc delete mode 100644 api-ref/source/samples/claim_messages_request.json delete mode 100644 api-ref/source/samples/claim_messages_response.json delete mode 100644 api-ref/source/samples/claim_query_response.json delete mode 100644 api-ref/source/samples/claim_update_request.json delete mode 100644 api-ref/source/samples/flavor-create-request.json delete mode 100644 api-ref/source/samples/flavor-list-response.json delete mode 100644 api-ref/source/samples/flavor-show-response.json delete mode 100644 api-ref/source/samples/flavor-update-request.json delete mode 100644 api-ref/source/samples/flavor-update-response.json delete mode 100644 api-ref/source/samples/health-response.json delete mode 100644 api-ref/source/samples/messages-get-byids-response.json delete mode 100644 api-ref/source/samples/messages-get-response.json delete mode 100644 api-ref/source/samples/messages-list-response.json delete mode 100644 api-ref/source/samples/messages-post-request.json delete mode 100644 api-ref/source/samples/messages-post-response.json delete mode 100644 api-ref/source/samples/pool-create-request.json delete mode 100644 api-ref/source/samples/pool-list-response.json delete mode 100644 api-ref/source/samples/pool-show-response.json delete mode 100644 api-ref/source/samples/pool-update-request.json delete mode 100644 api-ref/source/samples/pool-update-response.json delete mode 100644 api-ref/source/samples/purge-queue-request.json delete mode 100644 api-ref/source/samples/queue-create-request.json delete mode 100644 api-ref/source/samples/queue-pre-signed-request.json delete mode 100644 api-ref/source/samples/queue-pre-signed-response.json delete mode 100644 api-ref/source/samples/queue-show-response.json delete mode 100644 api-ref/source/samples/queue-stats-response.json delete mode 100644 api-ref/source/samples/queue-update-request.json delete mode 100644 api-ref/source/samples/queue-update-response.json delete mode 100644 api-ref/source/samples/queues-list-response.json delete mode 100644 api-ref/source/samples/subscription-create-request-http.json delete mode 100644 api-ref/source/samples/subscription-create-request-mail.json delete mode 100644 api-ref/source/samples/subscription-create-response.json delete mode 100644 api-ref/source/samples/subscription-show-response.json delete mode 100644 api-ref/source/samples/subscription-update-request.json delete mode 100644 api-ref/source/samples/subscriptions-list-response.json delete mode 100644 api-ref/source/samples/versions-list-response.json delete mode 100644 api-ref/source/subscription.inc delete mode 100644 api-ref/source/versions.inc delete mode 100644 babel.cfg delete mode 100644 bench-requirements.txt delete mode 100644 devstack/README.rst delete mode 100755 devstack/gate/gate_hook.sh delete mode 100755 devstack/gate/post_test_hook.sh delete mode 100755 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100755 devstack/upgrade/resource.sh delete mode 100644 devstack/upgrade/settings delete mode 100755 devstack/upgrade/shutdown.sh delete mode 100755 devstack/upgrade/upgrade.sh delete mode 100644 doc/README.md delete mode 100644 doc/source/admin/CORS.rst delete mode 100644 doc/source/admin/OSprofiler.rst delete mode 100644 doc/source/admin/gmr.rst delete mode 100644 doc/source/admin/index.rst delete mode 100644 doc/source/admin/running_benchmark.rst delete mode 100644 doc/source/admin/subscription_confirm.rst delete mode 100644 doc/source/admin/writing_pipeline_stages.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/contributor/development.environment.rst delete mode 100644 doc/source/contributor/first_patch.rst delete mode 100644 doc/source/contributor/first_review.rst delete mode 100644 doc/source/contributor/gerrit.rst delete mode 100644 doc/source/contributor/images/zaqar_review_id.png delete mode 100644 doc/source/contributor/index.rst delete mode 100644 doc/source/contributor/jenkins.rst delete mode 100644 doc/source/contributor/launchpad.rst delete mode 100644 doc/source/contributor/reviewer_guide.rst delete mode 100644 doc/source/contributor/running_tests.rst delete mode 100644 doc/source/contributor/storage.rst delete mode 100644 doc/source/contributor/test_suite.rst delete mode 100644 doc/source/contributor/transport.rst delete mode 100644 doc/source/contributor/welcome.rst delete mode 100644 doc/source/glossary.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/get_started.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/install-obs.rst delete mode 100644 doc/source/install/install-rdo.rst delete mode 100644 doc/source/install/install-ubuntu.rst delete mode 100644 doc/source/install/install.rst delete mode 100644 doc/source/install/next-steps.rst delete mode 100644 doc/source/install/verify.rst delete mode 100644 doc/source/user/authentication_tokens.rst delete mode 100644 doc/source/user/getting_started.rst delete mode 100644 doc/source/user/headers_queue_api_working.rst delete mode 100644 doc/source/user/index.rst delete mode 100644 doc/source/user/send_request_api.rst delete mode 100644 dox.yml delete mode 100644 etc/logging.conf.sample delete mode 100644 etc/oslo-config-generator/zaqar.conf delete mode 100644 etc/policy.json.sample delete mode 100644 etc/uwsgi.conf delete mode 100644 etc/zaqar-benchmark-messages.json delete mode 100644 etc/zaqar-benchmark.conf.sample delete mode 100644 rally-jobs/README.rst delete mode 100644 rally-jobs/extra/README.rst delete mode 100644 rally-jobs/plugins/README.rst delete mode 100644 rally-jobs/plugins/__init__.py delete mode 100644 rally-jobs/zaqar-zaqar.yaml delete mode 100644 releasenotes/notes/.gitignore delete mode 100644 releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml delete mode 100644 releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml delete mode 100644 releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml delete mode 100644 releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml delete mode 100644 releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml delete mode 100644 releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml delete mode 100644 releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml delete mode 100644 releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml delete mode 100644 releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml delete mode 100644 releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml delete mode 100644 releasenotes/notes/purge-queue-6788a249ee59d55a.yaml delete mode 100644 releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml delete mode 100644 releasenotes/notes/sql_init-c9b3883241631f24.yaml delete mode 100644 releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml delete mode 100644 releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml delete mode 100644 releasenotes/notes/support-cors-af8349382a44aa0d.yaml delete mode 100644 releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml delete mode 100644 releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml delete mode 100644 releasenotes/notes/support_dead_letter_queue_for_mongodb-c8b7303319e7f920.yaml delete mode 100644 releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml delete mode 100644 releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml delete mode 100644 releasenotes/source/_static/.gitignore delete mode 100644 releasenotes/source/_templates/.gitignore delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/liberty.rst delete mode 100644 releasenotes/source/mitaka.rst delete mode 100644 releasenotes/source/newton.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 requirements.txt delete mode 100644 samples/html/confirmation_web_service_sample.py delete mode 100644 samples/html/subscriptionConfirmation.html delete mode 100644 samples/html/unsubscriptionConfirmation.html delete mode 100755 samples/java-api-for-websocket/receive_message/JsonDecoder.java delete mode 100755 samples/java-api-for-websocket/receive_message/SampleZaqarEndpoint.java delete mode 100755 samples/java-api-for-websocket/send_message/SampleZaqarEndpoint.java delete mode 100755 samples/javascript/receive_message/zaqar_sample.js delete mode 100755 samples/javascript/send_message/zaqar_sample.js delete mode 100644 samples/javascript/websocket.html delete mode 100755 samples/jaxrs/receive_message/SampleZaqarServlet.java delete mode 100755 samples/jaxrs/send_message/SampleZaqarServlet.java delete mode 100755 samples/nodejs/receive_message/zaqar_sample.js delete mode 100755 samples/nodejs/send_message/zaqar_sample.js delete mode 100755 samples/python-zaqarclient/receive_message/zaqar_sample.py delete mode 100755 samples/python-zaqarclient/send_message/zaqar_sample.py delete mode 100644 samples/zaqar/subscriber_service_sample.py delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/doc/find_autodoc_modules.sh delete mode 100755 tools/doc/generate_autodoc_index.sh delete mode 100755 tools/test-setup.sh delete mode 100644 tox.ini delete mode 100644 zaqar/__init__.py delete mode 100644 zaqar/api/__init__.py delete mode 100644 zaqar/api/handler.py delete mode 100644 zaqar/api/v1/__init__.py delete mode 100644 zaqar/api/v1/request.py delete mode 100644 zaqar/api/v1/response.py delete mode 100644 zaqar/api/v1_1/__init__.py delete mode 100644 zaqar/api/v1_1/request.py delete mode 100644 zaqar/api/v1_1/response.py delete mode 100644 zaqar/api/v2/__init__.py delete mode 100644 zaqar/api/v2/endpoints.py delete mode 100644 zaqar/api/v2/request.py delete mode 100644 zaqar/api/v2/response.py delete mode 100644 zaqar/bench/__init__.py delete mode 100644 zaqar/bench/conductor.py delete mode 100644 zaqar/bench/config.py delete mode 100644 zaqar/bench/consumer.py delete mode 100644 zaqar/bench/helpers.py delete mode 100644 zaqar/bench/observer.py delete mode 100644 zaqar/bench/producer.py delete mode 100644 zaqar/bootstrap.py delete mode 100644 zaqar/cmd/__init__.py delete mode 100644 zaqar/cmd/gc.py delete mode 100644 zaqar/cmd/server.py delete mode 100644 zaqar/common/__init__.py delete mode 100644 zaqar/common/access.py delete mode 100644 zaqar/common/api/__init__.py delete mode 100644 zaqar/common/api/api.py delete mode 100644 zaqar/common/api/errors.py delete mode 100644 zaqar/common/api/request.py delete mode 100644 zaqar/common/api/response.py delete mode 100644 zaqar/common/api/schemas/__init__.py delete mode 100644 zaqar/common/api/schemas/flavors.py delete mode 100644 zaqar/common/api/schemas/pools.py delete mode 100644 zaqar/common/api/utils.py delete mode 100644 zaqar/common/auth.py delete mode 100644 zaqar/common/cache.py delete mode 100644 zaqar/common/cli.py delete mode 100644 zaqar/common/configs.py delete mode 100644 zaqar/common/consts.py delete mode 100644 zaqar/common/decorators.py delete mode 100644 zaqar/common/errors.py delete mode 100644 zaqar/common/pipeline.py delete mode 100644 zaqar/common/storage/__init__.py delete mode 100644 zaqar/common/storage/select.py delete mode 100644 zaqar/common/transport/__init__.py delete mode 100644 zaqar/common/transport/wsgi/__init__.py delete mode 100644 zaqar/common/transport/wsgi/helpers.py delete mode 100644 zaqar/common/urls.py delete mode 100644 zaqar/common/utils.py delete mode 100644 zaqar/context.py delete mode 100644 zaqar/hacking/__init__.py delete mode 100644 zaqar/hacking/checks.py delete mode 100644 zaqar/i18n.py delete mode 100644 zaqar/locale/es/LC_MESSAGES/zaqar.po delete mode 100644 zaqar/locale/fr/LC_MESSAGES/zaqar-log-error.po delete mode 100644 zaqar/notification/__init__.py delete mode 100644 zaqar/notification/notifier.py delete mode 100644 zaqar/notification/tasks/__init__.py delete mode 100644 zaqar/notification/tasks/mailto.py delete mode 100644 zaqar/notification/tasks/trust.py delete mode 100644 zaqar/notification/tasks/webhook.py delete mode 100644 zaqar/storage/__init__.py delete mode 100644 zaqar/storage/base.py delete mode 100644 zaqar/storage/configuration.py delete mode 100644 zaqar/storage/errors.py delete mode 100644 zaqar/storage/mongodb/__init__.py delete mode 100644 zaqar/storage/mongodb/catalogue.py delete mode 100644 zaqar/storage/mongodb/claims.py delete mode 100644 zaqar/storage/mongodb/controllers.py delete mode 100644 zaqar/storage/mongodb/driver.py delete mode 100644 zaqar/storage/mongodb/flavors.py delete mode 100644 zaqar/storage/mongodb/messages.py delete mode 100644 zaqar/storage/mongodb/options.py delete mode 100644 zaqar/storage/mongodb/pools.py delete mode 100644 zaqar/storage/mongodb/queues.py delete mode 100644 zaqar/storage/mongodb/subscriptions.py delete mode 100644 zaqar/storage/mongodb/utils.py delete mode 100644 zaqar/storage/pipeline.py delete mode 100644 zaqar/storage/pooling.py delete mode 100644 zaqar/storage/redis/__init__.py delete mode 100644 zaqar/storage/redis/claims.py delete mode 100644 zaqar/storage/redis/controllers.py delete mode 100644 zaqar/storage/redis/driver.py delete mode 100644 zaqar/storage/redis/messages.py delete mode 100644 zaqar/storage/redis/models.py delete mode 100644 zaqar/storage/redis/options.py delete mode 100644 zaqar/storage/redis/queues.py delete mode 100644 zaqar/storage/redis/scripting.py delete mode 100644 zaqar/storage/redis/scripts/claim_messages.lua delete mode 100644 zaqar/storage/redis/scripts/index_messages.lua delete mode 100644 zaqar/storage/redis/subscriptions.py delete mode 100644 zaqar/storage/redis/utils.py delete mode 100644 zaqar/storage/sqlalchemy/__init__.py delete mode 100644 zaqar/storage/sqlalchemy/catalogue.py delete mode 100644 zaqar/storage/sqlalchemy/controllers.py delete mode 100644 zaqar/storage/sqlalchemy/driver.py delete mode 100644 zaqar/storage/sqlalchemy/flavors.py delete mode 100644 zaqar/storage/sqlalchemy/migration/__init__.py delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic.ini delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/README.md delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/env.py delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/script.py.mako delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/001_liberty.py delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/002_placeholder.py delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/003_placeholder.py delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/004_placeholder.py delete mode 100644 zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/005_placeholder.py delete mode 100644 zaqar/storage/sqlalchemy/migration/cli.py delete mode 100644 zaqar/storage/sqlalchemy/options.py delete mode 100644 zaqar/storage/sqlalchemy/pools.py delete mode 100644 zaqar/storage/sqlalchemy/queues.py delete mode 100644 zaqar/storage/sqlalchemy/tables.py delete mode 100644 zaqar/storage/sqlalchemy/utils.py delete mode 100644 zaqar/storage/swift/__init__.py delete mode 100644 zaqar/storage/swift/claims.py delete mode 100644 zaqar/storage/swift/controllers.py delete mode 100644 zaqar/storage/swift/driver.py delete mode 100644 zaqar/storage/swift/messages.py delete mode 100644 zaqar/storage/swift/options.py delete mode 100644 zaqar/storage/swift/subscriptions.py delete mode 100644 zaqar/storage/swift/utils.py delete mode 100644 zaqar/storage/utils.py delete mode 100644 zaqar/tests/__init__.py delete mode 100644 zaqar/tests/base.py delete mode 100644 zaqar/tests/etc/drivers_storage_invalid.conf delete mode 100644 zaqar/tests/etc/drivers_transport_invalid.conf delete mode 100644 zaqar/tests/etc/functional-tests.conf delete mode 100644 zaqar/tests/etc/functional-zaqar.conf delete mode 100644 zaqar/tests/etc/keystone_auth.conf delete mode 100644 zaqar/tests/etc/policy.json delete mode 100644 zaqar/tests/etc/websocket_mongodb.conf delete mode 100644 zaqar/tests/etc/websocket_mongodb_keystone_auth.conf delete mode 100644 zaqar/tests/etc/websocket_mongodb_subscriptions.conf delete mode 100644 zaqar/tests/etc/wsgi_faulty.conf delete mode 100644 zaqar/tests/etc/wsgi_fifo_mongodb.conf delete mode 100644 zaqar/tests/etc/wsgi_mongodb.conf delete mode 100644 zaqar/tests/etc/wsgi_mongodb_default_limits.conf delete mode 100644 zaqar/tests/etc/wsgi_mongodb_pooled.conf delete mode 100644 zaqar/tests/etc/wsgi_mongodb_pooled_disable_virtual_pool.conf delete mode 100644 zaqar/tests/etc/wsgi_mongodb_validation.conf delete mode 100644 zaqar/tests/etc/wsgi_redis.conf delete mode 100644 zaqar/tests/etc/wsgi_redis_pooled.conf delete mode 100644 zaqar/tests/etc/wsgi_sqlalchemy.conf delete mode 100644 zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf delete mode 100644 zaqar/tests/etc/wsgi_swift.conf delete mode 100644 zaqar/tests/faulty_storage.py delete mode 100644 zaqar/tests/functional/__init__.py delete mode 100644 zaqar/tests/functional/base.py delete mode 100644 zaqar/tests/functional/config.py delete mode 100644 zaqar/tests/functional/helpers.py delete mode 100644 zaqar/tests/functional/http.py delete mode 100644 zaqar/tests/functional/websocket/__init__.py delete mode 100644 zaqar/tests/functional/websocket/test_queues.py delete mode 100644 zaqar/tests/functional/wsgi/__init__.py delete mode 100644 zaqar/tests/functional/wsgi/test_versions.py delete mode 100644 zaqar/tests/functional/wsgi/v1/__init__.py delete mode 100644 zaqar/tests/functional/wsgi/v1/test_claims.py delete mode 100644 zaqar/tests/functional/wsgi/v1/test_messages.py delete mode 100644 zaqar/tests/functional/wsgi/v1/test_queues.py delete mode 100644 zaqar/tests/functional/wsgi/v1_1/__init__.py delete mode 100644 zaqar/tests/functional/wsgi/v1_1/test_claims.py delete mode 100644 zaqar/tests/functional/wsgi/v1_1/test_health.py delete mode 100644 zaqar/tests/functional/wsgi/v1_1/test_messages.py delete mode 100644 zaqar/tests/functional/wsgi/v1_1/test_pools.py delete mode 100644 zaqar/tests/functional/wsgi/v1_1/test_queues.py delete mode 100644 zaqar/tests/functional/wsgi/v2/__init__.py delete mode 100644 zaqar/tests/functional/wsgi/v2/test_subscriptions.py delete mode 100644 zaqar/tests/helpers.py delete mode 100644 zaqar/tests/tempest_plugin/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/v1/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/v1/queues.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/v1_1/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/v1_1/queues.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/v2/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py delete mode 100644 zaqar/tests/tempest_plugin/config.py delete mode 100644 zaqar/tests/tempest_plugin/plugin.py delete mode 100644 zaqar/tests/tempest_plugin/services/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/services/messaging/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/services/messaging/json/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/services/messaging/json/messaging_client.py delete mode 100644 zaqar/tests/tempest_plugin/tests/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/tests/base.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1/test_claims.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1/test_messages.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1/test_queues.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1_1/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1_1/test_claims.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1_1/test_messages.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v1_1/test_queues.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/__init__.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_claims.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_claims_negative.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_messages.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_messages_negative.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_queues.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_queues_negative.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_subscriptions.py delete mode 100644 zaqar/tests/tempest_plugin/tests/v2/test_subscriptions_negative.py delete mode 100644 zaqar/tests/unit/__init__.py delete mode 100644 zaqar/tests/unit/common/__init__.py delete mode 100644 zaqar/tests/unit/common/storage/__init__.py delete mode 100644 zaqar/tests/unit/common/storage/test_select.py delete mode 100644 zaqar/tests/unit/common/storage/test_utils.py delete mode 100644 zaqar/tests/unit/common/test_api.py delete mode 100644 zaqar/tests/unit/common/test_decorators.py delete mode 100644 zaqar/tests/unit/common/test_pipeline.py delete mode 100644 zaqar/tests/unit/common/test_request.py delete mode 100644 zaqar/tests/unit/common/test_urls.py delete mode 100644 zaqar/tests/unit/hacking/__init__.py delete mode 100644 zaqar/tests/unit/hacking/test_hacking.py delete mode 100644 zaqar/tests/unit/notification/__init__.py delete mode 100644 zaqar/tests/unit/notification/test_notifier.py delete mode 100644 zaqar/tests/unit/storage/__init__.py delete mode 100644 zaqar/tests/unit/storage/base.py delete mode 100644 zaqar/tests/unit/storage/sqlalchemy_migration/__init__.py delete mode 100644 zaqar/tests/unit/storage/sqlalchemy_migration/test_db_manage_cli.py delete mode 100644 zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations.py delete mode 100644 zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations_base.py delete mode 100644 zaqar/tests/unit/storage/test_impl_mongodb.py delete mode 100644 zaqar/tests/unit/storage/test_impl_redis.py delete mode 100644 zaqar/tests/unit/storage/test_impl_sqlalchemy.py delete mode 100644 zaqar/tests/unit/storage/test_impl_swift.py delete mode 100644 zaqar/tests/unit/storage/test_pool_catalog.py delete mode 100644 zaqar/tests/unit/storage/test_utils.py delete mode 100644 zaqar/tests/unit/test_bootstrap.py delete mode 100644 zaqar/tests/unit/transport/__init__.py delete mode 100644 zaqar/tests/unit/transport/test_acl.py delete mode 100644 zaqar/tests/unit/transport/websocket/__init__.py delete mode 100644 zaqar/tests/unit/transport/websocket/base.py delete mode 100644 zaqar/tests/unit/transport/websocket/test_protocol.py delete mode 100644 zaqar/tests/unit/transport/websocket/utils.py delete mode 100644 zaqar/tests/unit/transport/websocket/v2/__init__.py delete mode 100644 zaqar/tests/unit/transport/websocket/v2/test_auth.py delete mode 100644 zaqar/tests/unit/transport/websocket/v2/test_claims.py delete mode 100644 zaqar/tests/unit/transport/websocket/v2/test_messages.py delete mode 100644 zaqar/tests/unit/transport/websocket/v2/test_queue_lifecycle.py delete mode 100644 zaqar/tests/unit/transport/websocket/v2/test_subscriptions.py delete mode 100644 zaqar/tests/unit/transport/wsgi/__init__.py delete mode 100644 zaqar/tests/unit/transport/wsgi/base.py delete mode 100644 zaqar/tests/unit/transport/wsgi/test_utils.py delete mode 100644 zaqar/tests/unit/transport/wsgi/test_version.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/__init__.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_auth.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_claims.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_default_limits.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_health.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_home.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_media_type.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_messages.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_pools.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_queue_lifecycle.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1/test_validation.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/__init__.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_auth.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_claims.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_default_limits.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_flavors.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_health.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_home.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_media_type.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_messages.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_ping.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_pools.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_queue_lifecycle.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v1_1/test_validation.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/__init__.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_auth.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_claims.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_default_limits.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_flavors.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_health.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_home.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_media_type.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_messages.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_ping.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_pools.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_purge.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_subscriptions.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_urls.py delete mode 100644 zaqar/tests/unit/transport/wsgi/v2_0/test_validation.py delete mode 100644 zaqar/transport/__init__.py delete mode 100644 zaqar/transport/acl.py delete mode 100644 zaqar/transport/base.py delete mode 100644 zaqar/transport/middleware/__init__.py delete mode 100644 zaqar/transport/middleware/auth.py delete mode 100644 zaqar/transport/middleware/cors.py delete mode 100644 zaqar/transport/middleware/profile.py delete mode 100644 zaqar/transport/utils.py delete mode 100644 zaqar/transport/validation.py delete mode 100644 zaqar/transport/websocket/__init__.py delete mode 100644 zaqar/transport/websocket/driver.py delete mode 100644 zaqar/transport/websocket/factory.py delete mode 100644 zaqar/transport/websocket/protocol.py delete mode 100644 zaqar/transport/wsgi/__init__.py delete mode 100644 zaqar/transport/wsgi/app.py delete mode 100644 zaqar/transport/wsgi/driver.py delete mode 100644 zaqar/transport/wsgi/errors.py delete mode 100644 zaqar/transport/wsgi/utils.py delete mode 100644 zaqar/transport/wsgi/v1_0/__init__.py delete mode 100644 zaqar/transport/wsgi/v1_0/claims.py delete mode 100644 zaqar/transport/wsgi/v1_0/health.py delete mode 100644 zaqar/transport/wsgi/v1_0/homedoc.py delete mode 100644 zaqar/transport/wsgi/v1_0/messages.py delete mode 100644 zaqar/transport/wsgi/v1_0/metadata.py delete mode 100644 zaqar/transport/wsgi/v1_0/pools.py delete mode 100644 zaqar/transport/wsgi/v1_0/queues.py delete mode 100644 zaqar/transport/wsgi/v1_0/stats.py delete mode 100644 zaqar/transport/wsgi/v1_1/__init__.py delete mode 100644 zaqar/transport/wsgi/v1_1/claims.py delete mode 100644 zaqar/transport/wsgi/v1_1/flavors.py delete mode 100644 zaqar/transport/wsgi/v1_1/health.py delete mode 100644 zaqar/transport/wsgi/v1_1/homedoc.py delete mode 100644 zaqar/transport/wsgi/v1_1/messages.py delete mode 100644 zaqar/transport/wsgi/v1_1/ping.py delete mode 100644 zaqar/transport/wsgi/v1_1/pools.py delete mode 100644 zaqar/transport/wsgi/v1_1/queues.py delete mode 100644 zaqar/transport/wsgi/v1_1/stats.py delete mode 100644 zaqar/transport/wsgi/v2_0/__init__.py delete mode 100644 zaqar/transport/wsgi/v2_0/claims.py delete mode 100644 zaqar/transport/wsgi/v2_0/flavors.py delete mode 100644 zaqar/transport/wsgi/v2_0/health.py delete mode 100644 zaqar/transport/wsgi/v2_0/homedoc.py delete mode 100644 zaqar/transport/wsgi/v2_0/messages.py delete mode 100644 zaqar/transport/wsgi/v2_0/ping.py delete mode 100644 zaqar/transport/wsgi/v2_0/pools.py delete mode 100644 zaqar/transport/wsgi/v2_0/purge.py delete mode 100644 zaqar/transport/wsgi/v2_0/queues.py delete mode 100644 zaqar/transport/wsgi/v2_0/stats.py delete mode 100644 zaqar/transport/wsgi/v2_0/subscriptions.py delete mode 100644 zaqar/transport/wsgi/v2_0/urls.py delete mode 100644 zaqar/transport/wsgi/version.py delete mode 100644 zaqar/version.py delete mode 100755 zaqar_upgradetests/post_test_hook.sh delete mode 100755 zaqar_upgradetests/pre_test_hook.sh diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 86770a47..00000000 --- a/.coveragerc +++ /dev/null @@ -1,8 +0,0 @@ -[run] -branch = True -omit = zaqar/tests/* - -[report] -exclude_lines = - if _ZAQAR_SETUP__: - raise NotImplementedError diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 194b3d40..00000000 --- a/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -*.bak -*.DS_Store -target/ -*.pyc -*.dat -TAGS -*.egg-info -*.egg -build -.coverage -.tox -cover -venv -.venv -output.xml -*.sublime-workspace -*.sqlite -*.html -.*.swp -.DS_Store -.testrepository -versioninfo -var/* -ChangeLog -AUTHORS -etc/zaqar.conf.sample -.idea -# Files created by releasenotes build -releasenotes/build diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 6de59929..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/zaqar.git diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 17b0e3b3..00000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} $JIT_FLAG -m subunit.run discover -t ${OS_TOP_LEVEL:-./} ${OS_TEST_PATH:-./zaqar/tests/unit} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/AUTHORS.rst b/AUTHORS.rst deleted file mode 100644 index 723f850e..00000000 --- a/AUTHORS.rst +++ /dev/null @@ -1,14 +0,0 @@ -Maintainer ----------- -OpenStack Foundation -IRC: #openstack on irc.freenode.net - -Original Authors ----------------- -Bryan Davidson (bryan.davidson@rackspace.com) -Kurt Griffiths (mail@kgriffs.com) -Jamie Painter (jamie.painter@rackspace.com) -Flavio Premoli (flaper87@flaper87.org) -Zhihao Yuan (lichray@gmail.com) - -See also AUTHORS for a complete list of contributors. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 066a28f4..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/zaqar - - - diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 6d3e3e55..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,136 +0,0 @@ -======================== -Zaqar style commandments -======================== - -- Step 1: Read the OpenStack Style Commandments - https://docs.openstack.org/developer/hacking/ -- Step 2: Read on for Zaqar specific commandments - -General -------- -- Optimize for readability; whitespace is your friend. -- Use blank lines to group related logic. -- All classes must inherit from ``object`` (explicitly). -- Use single-quotes for strings unless the string contains a - single-quote. -- Use the double-quote character for blockquotes (``"""``, not ``'''``) -- USE_ALL_CAPS_FOR_GLOBAL_CONSTANTS - -Comments --------- -- In general use comments as "memory pegs" for those coming after you up - the trail. -- Guide the reader though long functions with a comments introducing - different sections of the code. -- Choose clean, descriptive names for functions and variables to make - them self-documenting. -- Add ``# NOTE(termie): blah blah...`` comments to clarify your intent, or - to explain a tricky algorithm, when it isn't obvious from just reading - the code. - - -Identifiers ------------ -- Don't use single characters in identifiers except in trivial loop variables and mathematical algorithms. -- Avoid abbreviations, especially if they are ambiguous or their meaning would not be immediately clear to the casual reader or newcomer. - -Wrapping --------- -Wrap long lines by using Python's implied line continuation inside -parentheses, brackets and braces. Make sure to indent the continued -line appropriately. The preferred place to break around a binary -operator is after the operator, not before it. - -Example:: - - class Rectangle(Blob): - - def __init__(self, width, height, - color='black', emphasis=None, highlight=0): - - # More indentation included to distinguish this from the rest. - if (width == 0 and height == 0 and - color == 'red' and emphasis == 'strong' or - highlight > 100): - raise ValueError('sorry, you lose') - - if width == 0 and height == 0 and (color == 'red' or - emphasis is None): - raise ValueError("I don't think so -- values are {0}, {1}".format( - width, height)) - - msg = ('this is a very long string that goes on and on and on and' - 'on and on and on...') - - super(Rectangle, self).__init__(width, height, - color, emphasis, highlight) - - -Imports -------- -- Classes and functions may be hoisted into a package namespace, via __init__ files, with some discretion. - -More Import Examples --------------------- - -**INCORRECT** :: - - import zaqar.transport.wsgi as wsgi - -**CORRECT** :: - - from zaqar.transport import wsgi - -Docstrings ----------- - -Docstrings are required for all functions and methods. - -Docstrings should ONLY use triple-double-quotes (``"""``) - -Single-line docstrings should NEVER have extraneous whitespace -between enclosing triple-double-quotes. - -**INCORRECT** :: - - """ There is some whitespace between the enclosing quotes :( """ - -**CORRECT** :: - - """There is no whitespace between the enclosing quotes :)""" - -Docstrings should document default values for named arguments -if they're not None - -Docstrings that span more than one line should look like this: - -Example:: - - """Single-line summary, right after the opening triple-double-quote. - - If you are going to describe parameters and return values, use Sphinx; the - appropriate syntax is as follows. - - :param foo: the foo parameter - :param bar: (Default True) the bar parameter - :param foo_long_bar: the foo parameter description is very - long so we have to split it in multiple lines in order to - keep things ordered - :returns: return_type -- description of the return value - :returns: description of the return value - :raises ValueError: if the message_body exceeds 160 characters - :raises TypeError: if the message_body is not a basestring - """ - -**DO NOT** leave an extra newline before the closing triple-double-quote. - -Creating Unit Tests -------------------- -NOTE: 100% coverage is required - -Logging -------- -Use __name__ as the name of your logger and name your module-level logger -objects 'LOG':: - - LOG = logging.getLogger(__name__) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 00000000..8fcd2b2f --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 51737fb0..00000000 --- a/README.rst +++ /dev/null @@ -1,73 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/zaqar.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -===== -Zaqar -===== - -Zaqar is a multi-tenant cloud messaging and notification service for web -and mobile developers. -It combines the ideas pioneered by Amazon's SQS product with additional -semantics to support event broadcasting. - -The service features a fully RESTful API, which developers can use to send -messages between various components of their SaaS and mobile applications, by -using a variety of communication patterns. Underlying this API is an efficient -messaging engine designed with scalability and security in mind. - -Other OpenStack components can integrate with Zaqar to surface events to end -users and to communicate with guest agents that run in the "over-cloud" layer. -Cloud operators can leverage Zaqar to provide equivalents of SQS and SNS to -their customers. - -General information is available in wiki: - - https://wiki.openstack.org/wiki/Zaqar - -The API v2.0 (stable) specification and documentation are available at: - - https://wiki.openstack.org/wiki/Zaqar/specs/api/v2.0 - -Zaqar's Documentation, the source of which is in ``doc/source/``, is -available at: - - https://docs.openstack.org/zaqar/latest - -Contributors are encouraged to join IRC (``#openstack-zaqar`` channel on -``irc.freenode.net``): - - https://wiki.openstack.org/wiki/IRC - -Information on how to run unit and functional tests is available at: - - https://docs.openstack.org/zaqar/admin/running_tests.html - -Information on how to run benchmarking tool is available at: - - https://docs.openstack.org/zaqar/admin/running_benchmark.html - -Using Zaqar ------------ - -If you are new to Zaqar and just want to try it, you can set up Zaqar in -the development environment. - -Using Zaqar in production environment: - - Coming soon! - -Using Zaqar in development environment: - - The instruction is available at: - - https://docs.openstack.org/zaqar/contributor/development.environment.html - - This will allow you to run local Zaqar server with MongoDB as database. - - This way is the easiest, quickest and most suitable for beginners. \ No newline at end of file diff --git a/api-ref/source/claims.inc b/api-ref/source/claims.inc deleted file mode 100644 index 24fe3b25..00000000 --- a/api-ref/source/claims.inc +++ /dev/null @@ -1,218 +0,0 @@ -=============== -Claims (claims) -=============== -Claim is a mechanism to mark messages so that other workers will not process -the same message. - -Claim messages -============== - -.. rest_method:: POST /v2/queues/{queue_name}/claims - -Claims a set of messages from the specified queue. - -This operation claims a set of messages (up to the value of the ``limit`` -parameter) from oldest to newest and skips any messages that are already -claimed. If no unclaimed messages are available, the API returns a -``204 No Content`` message. - -When a client (worker) finishes processing a message, it should delete the -message before the claim expires to ensure that the message is processed only -once. As part of the delete operation, workers should specify the claim ID -(which is best done by simply using the provided href). If workers perform -these actions, then if a claim simply expires, the server can return an error -and notify the worker of the race condition. This action gives the worker a -chance to roll back its own processing of the given message because another -worker can claim the message and process it. - -The age given for a claim is relative to the server's clock. The claim's age -is useful for determining how quickly messages are getting processed and -whether a given message's claim is about to expire. - -When a claim expires, it is released. If the original worker failed to process -the message, another client worker can then claim the message. - -Note that claim creation is best-effort, meaning the worker may claim and -return less than the requested number of messages. - -The ``ttl`` attribute specifies how long the server waits before releasing -the claim. The ttl value must be between 60 and 43200 seconds (12 hours). -You must include a value for this attribute in your request. - -The ``grace`` attribute specifies the message grace period in seconds. The -value of ``grace`` value must be between 60 and 43200 seconds (12 hours). -You must include a value for this attribute in your request. To deal with -workers that have stopped responding (for up to 1209600 seconds or 14 days, -including claim lifetime), the server extends the lifetime of claimed messages -to be at least as long as the lifetime of the claim itself, plus the specified -grace period. If a claimed message would normally live longer than the claim's -live period, its expiration is not adjusted. - - - -Normal response codes: 201, 204 - -Error response codes: - -- Unauthorized(401) -- Forbidden(403) -- itemNotFound(404) -- ServiceUnavailable(503) - -Request Parameters ------------------- - - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - limit: claim_limit - - ttl: claim_ttl - - grace: claim_grace - -**Example Claim Messages: JSON request** - - -.. literalinclude:: samples/claim_messages_request.json - :language: javascript - - - -Response Parameters -------------------- - -**Example Claim Messages: JSON response** - - -.. literalinclude:: samples/claim_messages_response.json - :language: javascript - - - -Query Claim -=========== - -.. rest_method:: GET /v2/queues/{queue_name}/claims/{claim_id} - -Queries the specified claim for the specified queue. - -This operation queries the specified claim for the specified queue. Claims -with malformed IDs or claims that are not found by ID are ignored. - - - -Normal response codes: 200 - -Error response codes: - -- Unauthorized(401) -- Forbidden(403) -- itemNotFound(404) -- ServiceUnavailable(503) - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - claim_id: claim_id - -Response Parameters -------------------- - -**Example Query Claim: JSON response** - - -.. literalinclude:: samples/claim_query_response.json - :language: javascript - - - -Update(Renew) Claim -=================== - -.. rest_method:: PATCH /v2/queues/{queue_name}/claims/{claim_id} - -Updates the specified claim for the specified queue. - -This operation updates the specified claim for the specified queue. Claims -with malformed IDs or claims that are not found by ID are ignored. - -Clients should periodically renew claims during long-running batches of work -to avoid losing a claim while processing a message. The client can renew a -claim by issuing a ``PATCH`` command to a specific claim resource and -including a new TTL for the claim (which can be different from the original -TTL). The server resets the age of the claim and applies the new TTL. - - - -Normal response codes: 204 - -Error response codes: - -- Unauthorized(401) -- Forbidden(403) -- itemNotFound(404) -- ServiceUnavailable(503) - -Request Parameters ------------------- - - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - claim_id: claim_id - - ttl: claim_ttl - - grace: claim_grace - -**Example Update Claim: JSON request** - - -.. literalinclude:: samples/claim_update_request.json - :language: javascript - - -This operation does not return a response body. - - -Delete(Release) Claim -===================== - -.. rest_method:: DELETE /v2/queues/{queue_name}/claims/{claim_id} - -Releases the specified claim for the specified queue. - -This operation immediately releases a claim, making any remaining, undeleted) -messages that are associated with the claim available to other workers. Claims -with malformed IDs or claims that are not found by ID are ignored. - -This operation is useful when a worker is performing a graceful shutdown, -fails to process one or more messages, or is taking longer than expected to -process messages, and wants to make the remainder of the messages available -to other workers. - - - -Normal response codes: 204 - -Error response codes: - -- Unauthorized(401) -- Forbidden(403) -- itemNotFound(404) -- ServiceUnavailable(503) - -Request Parameters ------------------- - - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - claim_id: claim_id - - -This operation does not accept a request body and does not return a response -body. diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 0cf8b8d5..00000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,229 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# nova documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import subprocess -import sys - -import openstackdocstheme - -html_theme = 'openstackdocs' -html_theme_path = [openstackdocstheme.get_html_theme_path()] -html_theme_options = { - "sidebar_mode": "toc", -} - -extensions = [ - 'os_api_ref', -] - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Messaging Service API Reference' -copyright = u'2010-present, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from zaqar.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# Config logABug feature -giturl = u'http://git.openstack.org/cgit/openstack/zaqar/tree/api-ref/source' -# source tree -# html_context allows us to pass arbitrary values into the html template -html_context = {'bug_tag': 'api-ref', - 'giturl': giturl, - 'bug_project': 'zaqar'} - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' -git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", - "-n1"] -html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'zaqardoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Zaqar.tex', u'OpenStack Messaging Service API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/flavors.inc b/api-ref/source/flavors.inc deleted file mode 100644 index d607ff74..00000000 --- a/api-ref/source/flavors.inc +++ /dev/null @@ -1,207 +0,0 @@ -================= -Flavors (flavors) -================= - -Queue flavors allow users to have different types of queues based on the -storage capabilities. By using flavors, it's possible to allow consumers of the -service to choose between durable storage, fast storage, etc. Flavors must be -created by service administrators and they rely on the existence of pools. - -List flavors -============ - -.. rest_method:: GET /v2/flavors - -Lists flavors. - -This operation lists flavors for the project. The flavors are sorted -alphabetically by name. - - -Normal response codes: 200 - -Error response codes: - -- Unauthorized (401) -- Forbidden (403) - -Query Parameters ------------------ - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - detailed: detailed - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - flavors: flavors - - links: flavor_links - - -Response Example ----------------- - -.. literalinclude:: samples/flavor-list-response.json - :language: javascript - - -Create flavor -============= - -.. rest_method:: PUT /v2/flavors/{flavor_name} - -Creates a flavor. - -This operation creates a new flavor. - -``flavor_name`` is the name that you give to the flavor. The name must not -exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, -underscores, and hyphens. - - -Normal response codes: 201 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Forbidden (403) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - flavor_name: flavor_name_path - - pool_group: flavor_pool_group - -Request Example ---------------- - -.. literalinclude:: samples/flavor-create-request.json - :language: javascript - - -This operation does not return a response body. - - -Update flavor -============= - -.. rest_method:: PATCH /v2/flavors/{flavor_name} - -Updates a flavor. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Forbidden (403) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - flavor_name: flavor_name_path - - pool_group: flavor_pool_group - - -Request Example ---------------- - -.. literalinclude:: samples/flavor-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: samples/flavor-update-response.json - :language: javascript - - -Show flavor details -=================== - -.. rest_method:: GET /v2/flavors/{flavor_name} - -Shows details for a flavor. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Forbidden (403) -- Not Found (404) -- ServiceUnavailable (503) - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - flavor_name: flavor_name_path - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: flavor_name - - capabilities: capabilities - - pool_group: flavor_pool_group - - href: flavor_href - -Response Example ----------------- - -.. literalinclude:: samples/flavor-show-response.json - :language: javascript - - -Delete flavor -============= - -.. rest_method:: DELETE /v2/flavors/{flavor_name} - -Deletes the specified flavor. - -This operation immediately deletes a flavor. - -``flavor_name`` is the name that you give to the flavor. The name must not -exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, -underscores, and hyphens. - - -Normal response codes: 204 - -Error response codes: - -- Unauthorized (401) -- Forbidden (403) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - flavor_name: flavor_name_path - -This operation does not accept a request body and does not return a response -body. diff --git a/api-ref/source/health.inc b/api-ref/source/health.inc deleted file mode 100644 index fba0ad0f..00000000 --- a/api-ref/source/health.inc +++ /dev/null @@ -1,69 +0,0 @@ -=============== -Health (health) -=============== -With health API, user or operator can get a general idea about the status of -Zaqar server. Those information can be used for basic validation, performance -checking, etc. - -Ping -==== - -.. rest_method:: GET /v2/ping - -Simple health check for end user. - -A request to ping Zaqar server when server is working returns 204, otherwise -returns 503. This can be a handy API for end user to check if the messaging -service is in working status. - -Normal response codes: 204 - -Error response codes: - -- ServiceUnavailable (503) - - -This operation does not accept a request body and does not return a response -body. - - -Health -====== - -.. rest_method:: GET /v2/health - -Detailed health check for cloud operator/admin. - -This is an ``admin only`` API. A request to get detailed health information -of Zaqar server. - -The response body will depend on the storage setting of Zaqar server. By -default, there is no pool created. Then the response body will only -contain the ``catalog_reachable``. Otherwise, the response body will have -``catalog_reachable`` and the health status for each pool. - -Normal response codes: 200 - -Error response codes: - -- Unauthorized (401) -- Forbidden (403) -- ServiceUnavailable (503) - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - catalog_reachable: catalog_reachable - - storage_reachable: storage_reachable - - operation_status: operation_status - - -Response Example ----------------- - -.. literalinclude:: samples/health-response.json - :language: javascript - diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 3c3f8739..00000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -:tocdepth: 2 - -================================== -Messaging Service API v2 (CURRENT) -================================== - -.. include:: versions.inc -.. include:: queues.inc -.. include:: messages.inc -.. include:: claims.inc -.. include:: subscription.inc -.. include:: health.inc -.. include:: pools.inc -.. include:: flavors.inc diff --git a/api-ref/source/messages.inc b/api-ref/source/messages.inc deleted file mode 100644 index d26fbac9..00000000 --- a/api-ref/source/messages.inc +++ /dev/null @@ -1,334 +0,0 @@ -=================== -Messages (messages) -=================== -Message is sent through a queue and exists until it is deleted by a recipient -or automatically by the system based on a TTL (time-to-live) value. - -All message-related operations require Client-Id to be included in the headers. -This is to ensure that messages are not echoed back to the client that posted -them unless the client explicitly requests this. - -Post Message -============ - -.. rest_method:: POST /v2/queues/{queue_name}/messages - -Posts the message or messages for the specified queue. - -This operation posts the specified message or messages. - -You can submit up to 10 messages in a single request, but you must always -encapsulate the messages in a collection container (an array in JSON, even -for a single message - without the JSON array, you receive the "Invalid request -body" message). The resulting value of the Location header or response body -might be used to retrieve the created messages for further processing. - -The client specifies only the body and TTL for the message. The server inserts -metadata, such as ID and age. - -The response body contains a list of resource paths that correspond to each -message submitted in the request, in the order of the messages. If a -server-side error occurs during the processing of the submitted messages, a -partial list is returned, the partial attribute is set to true, and the client -tries to post the remaining messages again. If the server cannot enqueue any -messages, the server returns a ``503 Service Unavailable`` error message. - -The ``body`` attribute specifies an arbitrary document that constitutes the -body of the message being sent. - -.The following rules apply for the maximum size: - -The maximum size of posted messages is the maximum size of the entire request -document (rather than the sum of the individual message body field values as -it was in earlier releases). On error, the client will now be notified of how -much it exceeded the limit. - -The size is limited to 256 KB, including whitespace. - -The document must be valid JSON. (The Message Queuing service validates it.) - -The ``ttl`` attribute specifies how long the server waits before marking the -message as expired and removing it from the queue. The value of ``ttl`` must -be between 60 and 1209600 seconds (14 days). Note that the server might not -actually delete the message until its age has reached up to (ttl + 60) seconds, -to allow for flexibility in storage implementations. - - -Normal response codes: 201 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - -Request Example ---------------- - -.. literalinclude:: samples/messages-post-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - resources: messages_resources - -Response Example ----------------- - -.. literalinclude:: samples/messages-post-response.json - :language: javascript - - -List Messages -============= - -.. rest_method:: GET /v2/queues/{queue_name}/messages - -List the messages in the specified queue. - -A request to list messages when the queue is not found or when messages are -not found returns 204, instead of 200, because there was no information to -send back. Messages with malformed IDs or messages that are not found by ID -are ignored. - -This operation gets the message or messages in the specified queue. - -Message IDs and markers are opaque strings. Clients should make no assumptions -about their format or length. Furthermore, clients should assume that there is -no relationship between markers and message IDs (that is, one cannot be derived -from the other). This allows for a wide variety of storage driver -implementations. - -Results are ordered by age, oldest message first. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - marker: marker - - limit: limit - - echo: echo - - include_claimed: include_claimed - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - messages: messages - - links: links - -Response Example ----------------- - -.. literalinclude:: samples/messages-list-response.json - :language: javascript - - -Get A Set Of Messages By Id -=========================== - -.. rest_method:: GET /v2/queues/{queue_name}/messages?ids={ids} - -Gets a specified set of messages from the specified queue. - -This operation provides a more efficient way to query multiple messages -compared to using a series of individual ``GET`` s. Note that the list of IDs -cannot exceed 20. If a malformed ID or a nonexistent message ID is provided, -it is ignored, and the remaining messages are returned. - -Unlike the Get Messages operation, a client's own messages are always returned -in this operation. If you use the ids parameter, the echo parameter is not used -and is ignored if it is specified. - -The message attributes are defined as follows: ``href`` is an opaque relative -URI that the client can use to uniquely identify a message resource and -interact with it. ``ttl`` is the TTL that was set on the message when it was -posted. The message expires after (ttl - age) seconds. ``age`` is the number -of seconds relative to the server's clock. ``body`` is the arbitrary document -that was submitted with the original request to post the message. - - - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - ids: ids - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - messages: messages - -Response Example ----------------- - -.. literalinclude:: samples/messages-get-byids-response.json - :language: javascript - - -Delete A Set Of Messages By Id -============================== - -.. rest_method:: DELETE /v2/queues/{queue_name}/messages?ids={ids} - -Provides a bulk delete for messages. - -This operation immediately deletes the specified messages. If any of the -message IDs are malformed or non-existent, they are ignored. The remaining -valid messages IDs are deleted. - - - -Normal response codes: 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - ids: ids - - pop: pop - - -This operation does not accept a request body and does not return a response -body. - - -Get A Specific Message -====================== - -.. rest_method:: GET /v2/queues/{queue_name}/messages/{message_id} - -Gets the specified message from the specified queue. - -This operation gets the specified message from the specified queue. - -If either the message ID is malformed or nonexistent, no message is returned. - -Message fields are defined as follows: ``href`` is an opaque relative URI that -the client can use to uniquely identify a message resource and interact with -it. ``ttl`` is the TTL that was set on the message when it was posted. The -message expires after (ttl - age) seconds. ``age`` is the number of seconds -relative to the server's clock. ``body`` is the arbitrary document that was -submitted with the original request to post the message. - - - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - message_id: message_id - -Response Example ----------------- - -.. literalinclude:: samples/messages-get-response.json - :language: javascript - - -Delete A Specific Message -========================= - -.. rest_method:: DELETE /v2/queues/{queue_name}/messages/{message_id} - -Deletes the specified message from the specified queue. - -This operation immediately deletes the specified message. - -The ``claim_id`` parameter specifies that the message is deleted only if it -has the specified claim ID and that claim has not expired. This specification -is useful for ensuring only one worker processes any given message. When a -worker's claim expires before it can delete a message that it has processed, -the worker must roll back any actions it took based on that message because -another worker can now claim and process the same message. - -If you do not specify ``claim_id``, but the message is claimed, the operation -fails. You can only delete claimed messages by providing an appropriate -``claim_id``. - - - -Normal response codes: 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - message_id: message_id - -This operation does not accept a request body and does not return a response -body. - diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml deleted file mode 100644 index 6146013e..00000000 --- a/api-ref/source/parameters.yaml +++ /dev/null @@ -1,498 +0,0 @@ -#### variables in header ##################################################### - -client_id: - type: UUID - in: header - description: | - A UUID for each client instance. The UUID must be submitted in its - canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). The - client generates the Client-ID once. Client-ID persists between restarts - of the client so the client should reuse that same Client-ID. Note: All - message-related operations require the use of ``Client-ID`` in the headers - to ensure that messages are not echoed back to the client that posted - them, unless the client explicitly requests this. - -#### variables in path ####################################################### - -claim_id: - type: string - in: path - required: True - description: | - The id of the claim. - -flavor_name_path: - type: string - in: path - required: True - description: - The name of the flavor. - -message_id: - type: string - in: path - required: True - description: | - The ID of the message. - -pool_name_path: - type: string - in: path - required: True - description: - The name of the pool. - -queue_name: - type: string - in: path - required: True - description: | - The name of the queue. - -subscription_id_path: - type: string - in: path - required: True - description: | - The id of the subscription. - -#### variables in query ###################################################### - -claim_limit: - type: integer - in: query - required: false - description: | - The ``limit`` specifies up to 20 messages (configurable) to claim. If not - specified, limit defaults to 10. Note that claim creation is best-effort, - meaning the server may claim and return less than the requested number of - messages. - -detailed: - type: boolean - in: query - required: false - description: | - The 'detailed' specifies if showing the detailed information when querying - queues, flavors and pools. - -echo: - type: boolean - in: query - required: false - description: - Indicate if the messages can be echoed back to the client that posted - them. - -ids: - type: list - in: query - required: false - description: | - A list of the messages ids. ``pop`` & ``ids`` parameters are mutually - exclusive. Using them together in a request will result in HTTP 400. - - NOTE: Actually, it's not a real list, it's string combined with many - message ids separated with comma, for example: - /messages?ids=578f0055508f153f256f717e,578f0055508f153f256f717f - -include_claimed: - type: boolean - in: query - required: false - description: - Indicate if the messages list should include the claimed messages. - -limit: - type: integer - in: query - required: false - description: | - Requests a page size of items. Returns a number of items up to a limit - value. Use the ``limit`` parameter to make an initial limited request and - use the ID of the last-seen item from the response as the ``marker`` - parameter value in a subsequent limited request. - -marker: - type: string - in: query - required: false - description: | - The ID of the last-seen item. Use the ``limit`` parameter to make an - initial limited request and use the ID of the last-seen item from the - response as the ``marker`` parameter value in a subsequent limited request. - -pop: - type: integer - in: query - required: false - description: | - The ``pop`` specifies how many messages will be popped up from the queue. - ``pop`` & ``ids`` parameters are mutually exclusive. Using them together - in a request will result in HTTP 400. - -#### variables in request #################################################### - -_dead_letter_queue: - type: string - in: body - required: False - description: | - The target the message will be moved to when the message can't processed - successfully after meet the max claim count. It's not supported to add - queue C as the dead letter queue for queue B where queue B has been set - as a dead letter queue for queue A. There is no default value for this - attribute. If it's not set explicitly, then that means there is no dead - letter queue for current queue. It is one of the ``reserved attributes`` - of Zaqar queues. - -_dead_letter_queue_messages_ttl: - type: integer - in: body - required: False - description: | - The new TTL setting for messages when moved to dead letter queue. If it's - not set, current TTL will be kept. It is one of the ``reserved attributes`` - of Zaqar queues. - -_default_message_ttl: - type: integer - in: body - required: True - description: | - The default TTL of messages defined for a queue, which will effect for - any messages posted to the queue. So when there is no TTL defined for a - message, the queue's _default_message_ttl will be used. By default, the - value is the same value defined as ''max_message_ttl'' in zaqar.conf. It is - one of the ``reserved attributes`` of Zaqar queues. The value will be - reverted to the default value after deleting it explicitly. - -_flavor: - type: string - in: body - required: False - description: | - The flavor name which can tell Zaqar which storage pool will be used to - create the queue. It is one of the ``reserved attributes`` of Zaqar - queues. - -_max_claim_count: - type: integer - in: body - required: False - description: | - The max number the message can be claimed. Generally, - it means the message cannot be processed successfully. There is no default - value for this attribute. If it's not set, then that means this feature - won't be enabled for current queue. It is one of the - ``reserved attributes`` of Zaqar queues. - -_max_messages_post_size: - type: integer - in: body - required: True - description: | - The max post size of messages defined for a queue, which will effect for - any messages posted to the queue. So user can define a queue's level - cap for post size which can't bigger than the max_messages_post_size - defined in zaqar.conf. It is one of the ``reserved attributes`` of Zaqar - queues. The value will be reverted to the default value after deleting it - explicitly. - -capabilities: - type: list - in: body - description: | - Capabilities describe what this flavor is capable of base on the storage - capabilities. They are used to inform the final user such capabilities. - -catalog_reachable: - type: boolean - in: body - required: True - description: | - A boolean value to indicate if the management(catalog) datatabse is - reachable or not. - -claim_grace: - type: integer - in: body - required: false - description: | - The ``grace`` attribute specifies the message grace period in seconds. The - value of ``grace`` value must be between 60 and 43200 seconds (12 hours). - You must include a value for this attribute in your request. To deal with - workers that have stopped responding (for up to 1209600 seconds or 14 days, - including claim lifetime), the server extends the lifetime of claimed - messages to be at least as long as the lifetime of the claim itself, plus - the specified grace period. If a claimed message would normally live longer - than the claim's live period, its expiration is not adjusted. - -claim_ttl: - type: integer - in: body - required: false - description: | - The ``ttl`` attribute specifies how long the server waits before releasing - the claim. The ttl value must be between 60 and 43200 seconds (12 hours). - You must include a value for this attribute in your request. - -flavor_href: - type: string - in: body - description: | - The url of the flavor. - -flavor_links: - type: array - in: body - required: true - description: | - Links related to the flavors. This is a list of dictionaries, each including - keys ``href`` and ``rel``. - -flavor_name: - type: string - in: body - required: true - description: | - The name of the flavor. - -flavor_pool_group: - type: string - in: body - required: true - description: | - The ``pool_group`` attribute specifies the name of the pool group - this flavor sits on top of. - -flavors: - type: list - in: body - description: | - A list of the flaovrs. - -links: - type: array - in: body - required: true - description: | - Links related to the queues. This is a list of dictionaries, each including - keys ``href`` and ``rel``. - -messages: - type: list - in: body - required: True - description: | - A list of the messages. - -messages_resources: - type: list - in: body - description: | - A list of the URL to messages. - -operation_status: - type: dict - in: body - required: False - description: | - A dict which will contain the status for many different actions/operations. - For example, post_messages, delete_messages, delete queue, etc. And each - status is a dict which contains three items: ``seconds``, ``ref`` and - ``succeeded``. Seconds means how long the operation took and succeeded will - indicate if the actions was successful or not. Ref may contain the - information if the succeeded is False, otherwise it's null. - -pool_group: - type: string - in: body - required: false - description: | - The ``group`` attribute specifies a tag to given to more than one pool - so that it keeps user remind the purpose/capabilities of all pools that - falls under that group. - -pool_href: - type: string - in: body - description: | - The url of the pool. - -pool_links: - type: array - in: body - required: true - description: | - Links related to the pools. This is a list of dictionaries, each including - keys ``href`` and ``rel``. - -pool_name: - type: string - in: body - description: | - The name of the pool. - -pool_options: - type: dict - in: body - required: false - description: | - The ``options`` attribute gives storage-specific options used by storage - driver implementations. The value must be a dict and valid key-value come - from the registered options for a given storage backend. - -pool_uri: - type: string - in: body - required: true - description: | - The ``uri`` attribute specifies a connection string compatible with a - storage client (e.g., pymongo) attempting to connect to that pool. - -pool_weight: - type: integer - in: body - required: true - description: | - The ``weight`` attribute specifies the likelihood that this pool will be - selected for the next queue allocation. The value must be an integer - greater than -1. - -pools: - type: list - in: body - description: | - A list of the pools. - -pre_signed_queue_expires: - type: string - in: body - required: True - description: | - The time to indicate when the pre-signed will be expired. - -pre_signed_queue_methods: - type: list - in: body - required: True - description: | - A list of HTTP methods. The HTTP method(s) this URL was created for. By - selecting the HTTP method, it’s possible to give either read or read/write - access to a specific resource. - -pre_signed_queue_paths: - type: list - in: body - required: True - description: | - A list of paths the pre-signed queue can support. It could be a set of - ``messages``, ``subscriptions``, ``claims``. - -pre_signed_queue_signature: - type: list - in: body - required: True - description: | - The signature is generated after create the pre-signed URL. It can be - consumed by adding below to HTTP headers: - - URL-Signature: 6a63d63242ebd18c3518871dda6fdcb6273db2672c599bf985469241e9a1c799 - URL-Expires: 2015-05-31T19:00:17Z - -project_id: - type: string - in: body - required: True - description: | - The ID of current project/tenant. - -queue_metadata: - type: dict - in: body - description: | - Metadata of queue. - -queues: - type: list - in: body - description: | - A list of the queues. - -resource_types: - type: list - in: body - required: false - description: | - The ``resource_types`` attribute allows user to purge particular resource - of the queue. - -storage_reachable: - type: boolean - in: body - required: False - description: | - A boolean value to indicate if the messages(pool) datatabse is - reachable or not. - -subscriber: - type: string - in: body - required: True - description: | - The ``subscriber`` attribute specifies the destination where the message - notify to. It has been designed to match the Internet RFC on Relative - Uniform Resource Locators. Zaqar now support two kinds of subscribers: - http/https and email. The http/https subscriber should start with - ``http/https``. The email subscriber should start with ``mailto``. - -subscription_age: - type: integer - in: body - description: | - How long the subscription has be existed. - -subscription_id: - type: string - in: body - description: | - The id of the subscription. - -subscription_options: - type: dict - in: body - required: false - description: | - The ``options`` attribute specifies the extra metadata for the subscription - . The value must be a dict and could contain any key-value. If the - subscriber is "mailto". The ``options`` can contain ``from`` and - ``subject`` to indicate the email's author and title. - -subscription_source: - type: string - in: body - description: | - The queue name which the subscription is registered on. - -subscription_ttl: - type: integer - in: body - required: false - description: | - The ``ttl`` attribute specifies how long the subscription be alive. The ttl - value must be great than 60 seconds. The default value is 3600 seconds. - - -subscriptions: - type: list - in: body - description: | - A list of the subscriptions. - -versions: - type: list - in: body - required: True - description: | - A list of supported major API versions. diff --git a/api-ref/source/pools.inc b/api-ref/source/pools.inc deleted file mode 100644 index e24dd920..00000000 --- a/api-ref/source/pools.inc +++ /dev/null @@ -1,212 +0,0 @@ -=============== -Pools (pools) -=============== -If pooling is enabled, queuing service uses multiple queues databases in order -to scale horizontally. A pool (queues database) can be added any time without -stopping the service. Each pool has a weight that is assigned during the -creation time but can be changed later. Pooling is done by queue which -indicates that all messages for a particular queue can be found in the same -pool (queues database). - -List pools -========== - -.. rest_method:: GET /v2/pools - -Lists pools. - -This operation lists pools for the project. The pools are sorted -alphabetically by name. - - -Normal response codes: 200 - -Error response codes: - -- Not Found (404) -- Unauthorized (401) - -Query Parameters ------------------ - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - detailed: detailed - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - pools: pools - - links: pool_links - - -Response Example ----------------- - -.. literalinclude:: samples/pool-list-response.json - :language: javascript - - -Create pool -============ - -.. rest_method:: PUT /v2/pools/{pool_name} - -Creates a pool. - -This operation creates a new pool. - -``pool_name`` is the name that you give to the pool. The name must not -exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, -underscores, and hyphens. - - -Normal response codes: 201 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Conflict (409) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - pool_name: pool_name_path - - weight: pool_weight - - uri: pool_uri - - group: pool_group - - options: pool_options - -Request Example ---------------- - -.. literalinclude:: samples/pool-create-request.json - :language: javascript - - -This operation does not return a response body. - - -Update pool -============ - -.. rest_method:: PATCH /v2/pools/{pool_name} - -Updates a pool. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - pool_name: pool_name_path - - weight: pool_weight - - uri: pool_uri - - group: pool_group - - options: pool_options - - -Request Example ---------------- - -.. literalinclude:: samples/pool-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: samples/pool-update-response.json - :language: javascript - - -Show pool details -================== - -.. rest_method:: GET /v2/pools/{pool_name} - -Shows details for a pool. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - pool_name: pool_name_path - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: pool_name - - weight: pool_weight - - uri: pool_uri - - group: pool_group - - href: pool_href - -Response Example ----------------- - -.. literalinclude:: samples/pool-show-response.json - :language: javascript - - -Delete pool -=============== - -.. rest_method:: DELETE /v2/pools/{pool_name} - -Deletes the specified pool. - -This operation immediately deletes a pool. - -``pool_name`` is the name that you give to the pool. The name must not -exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, -underscores, and hyphens. - - -Normal response codes: 204 - -Error response codes: - -- Unauthorized (401) -- Forbidden (403) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - pool_name: pool_name_path - -This operation does not accept a request body and does not return a response -body. diff --git a/api-ref/source/queues.inc b/api-ref/source/queues.inc deleted file mode 100644 index 47ec4986..00000000 --- a/api-ref/source/queues.inc +++ /dev/null @@ -1,366 +0,0 @@ -=============== -Queues (queues) -=============== -Queue is a logical entity that groups messages. Ideally a queue is created per -work type. For example, if you want to compress files, you would create a queue -dedicated for this job. Any application that reads from this queue would only -compress files. - -Nowadays, queue in Zaqar is most like a topic, it's created lazily. User can -post messages to a queue before creating the queue. Zaqar will create the -queue/topic automatically. - -List queues -=========== - -.. rest_method:: GET /v2/queues - -Lists queues. - -A request to list queues when you have no queues in your account returns 204, -instead of 200, because there was no information to send back. - -This operation lists queues for the project. The queues are sorted -alphabetically by name. - - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - detailed: detailed - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - queues: queues - - links: links - - -Response Example ----------------- - -.. literalinclude:: samples/queues-list-response.json - :language: javascript - - -Create queue -============ - -.. rest_method:: PUT /v2/queues/{queue_name} - -Creates a queue. - -This operation creates a new queue. - -The body of the request is empty. - -``queue_name`` is the name that you give to the queue. The name must not -exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, -underscores, and hyphens. - -When create queue, user can specify metadata for the queue. Currently, Zaqar -supports below metadata: _flavor, _max_claim_count, _dead_letter_queue and -_dead_letter_queue_messages_ttl. - - -Normal response codes: 201, 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - _dead_letter_queue: _dead_letter_queue - - _dead_letter_queue_messages_ttl: _dead_letter_queue_messages_ttl - - _default_message_ttl: _default_message_ttl - - _flavor: _flavor - - _max_claim_count: _max_claim_count - - _max_messages_post_size: _max_messages_post_size - -Request Example ---------------- - -.. literalinclude:: samples/queue-create-request.json - :language: javascript - - -This operation does not return a response body. - - -Update queue -============ - -.. rest_method:: PATCH /v2/queues/{queue_name} - -Updates a queue. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- Conflict (409) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - -When setting the request body of updating queue, the body must be a list which -contains a series of json object which follows -https://tools.ietf.org/html/draft-ietf-appsawg-json-patch-10. - -.. note:: - - - The "Content-Type" header should be - "application/openstack-messaging-v2.0-json-patch" - - - The ''path'' must start with /metadata, for example, if the key is - ''ttl'', then the path should be /metadata/ttl - - -Request Example ---------------- - -.. literalinclude:: samples/queue-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: samples/queue-update-response.json - :language: javascript - - -Show queue details -================== - -.. rest_method:: GET /v2/queues/{queue_name} - -Shows details for a queue. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - _max_messages_post_size: _max_messages_post_size - - _default_message_ttl: _default_message_ttl - -Response Example ----------------- - -.. literalinclude:: samples/queue-show-response.json - :language: javascript - - -Delete queue -=============== - -.. rest_method:: DELETE /v2/queues/{queue_name} - -Deletes the specified queue. - -This operation immediately deletes a queue and all of its existing messages. - -``queue_name`` is the name that you give to the queue. The name must not -exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, -underscores, and hyphens. - - -Normal response codes: 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - -This operation does not accept a request body and does not return a response -body. - - -Get queue stats -=============== - -.. rest_method:: GET /v2/queues/{queue_name}/stats - -Returns statistics for the specified queue. - -This operation returns queue statistics, including how many messages are in -the queue, categorized by status. - -If the value of the ``total`` attribute is 0, then ``oldest`` and ``newest`` -message statistics are not included in the response. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - -Response Example ----------------- - -.. literalinclude:: samples/queue-stats-response.json - :language: javascript - - -Pre-signed queue -================ - -.. rest_method:: POST /v2/queues/{queue_name}/share - -Create a pre-signed URL for a given queue. - -.. note:: - - In the case of pre-signed URLs, the queue cannot be created lazily. This - is to prevent cases where queues are deleted and users still have a valid - URL. This is not a big issues in cases where there’s just 1 pool. However, - if there’s a deployment using more than 1 type of pool, the lazily created - queue may end up in an undesired pool and it’d be possible for an attacker - to try a DoS on that pool. Therefore, whenever a pre-signed URL is created, - if a pool doesn’t exist, it needs to be created. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - paths: pre_signed_queue_paths - - methods: pre_signed_queue_methods - - expires: pre_signed_queue_expires - -Request Example ---------------- - -.. literalinclude:: samples/queue-pre-signed-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - project: project_id - - paths: pre_signed_queue_paths - - methods: pre_signed_queue_methods - - expires: pre_signed_queue_expires - - signature: pre_signed_queue_signature - -Response Example ----------------- - -.. literalinclude:: samples/queue-pre-signed-response.json - :language: javascript - - -Purge queue -=========== - -.. rest_method:: POST /v2/queues/{queue_name}/purge - -Purge particular resource of the queue. - -.. note:: - - Now Zaqar supports to purge "messages" and "subscriptions" resource from - a queue. - -Normal response codes: 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - resource_types: resource_types - -Request Example ---------------- - -.. literalinclude:: samples/purge-queue-request.json - :language: javascript diff --git a/api-ref/source/samples/claim_messages_request.json b/api-ref/source/samples/claim_messages_request.json deleted file mode 100644 index cbcbb090..00000000 --- a/api-ref/source/samples/claim_messages_request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "ttl": 300, - "grace": 300 -} \ No newline at end of file diff --git a/api-ref/source/samples/claim_messages_response.json b/api-ref/source/samples/claim_messages_response.json deleted file mode 100644 index b6347d40..00000000 --- a/api-ref/source/samples/claim_messages_response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "messages": [ - { - "body": { - "event": "BackupStarted" - }, - "age": 239, - "href": "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357?claim_id=51db7067821e727dc24df754", - "ttl": 300 - } - ] -} diff --git a/api-ref/source/samples/claim_query_response.json b/api-ref/source/samples/claim_query_response.json deleted file mode 100644 index 49fefd71..00000000 --- a/api-ref/source/samples/claim_query_response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "age": 57, - "href": "/v2/queues/demoqueue/claims/51db7067821e727dc24df754", - "messages": [ - { - "body": { - "event": "BackupStarted" - }, - "age": 296, - "href": "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357?claim_id=51db7067821e727dc24df754", - "ttl": 300 - } - ], - "ttl": 300 -} \ No newline at end of file diff --git a/api-ref/source/samples/claim_update_request.json b/api-ref/source/samples/claim_update_request.json deleted file mode 100644 index 4e6292cc..00000000 --- a/api-ref/source/samples/claim_update_request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "ttl": 300, - "grace": 300 -} \ No newline at end of file diff --git a/api-ref/source/samples/flavor-create-request.json b/api-ref/source/samples/flavor-create-request.json deleted file mode 100644 index d2da2d88..00000000 --- a/api-ref/source/samples/flavor-create-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "pool_group": "testgroup" -} \ No newline at end of file diff --git a/api-ref/source/samples/flavor-list-response.json b/api-ref/source/samples/flavor-list-response.json deleted file mode 100644 index f890dd3d..00000000 --- a/api-ref/source/samples/flavor-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "flavors": [ - { - "href": "/v2/flavors/test_flavor1", - "pool_group": "testgroup", - "name": "test_flavor1", - "pool": "testgroup" - }, - { - "href": "/v2/flavors/test_flavor2", - "pool_group": "testgroup", - "name": "test_flavor2", - "pool": "testgroup" - } - ], - "links": [ - { - "href": "/v2/flavors?marker=test_flavor2", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/flavor-show-response.json b/api-ref/source/samples/flavor-show-response.json deleted file mode 100644 index edba44fd..00000000 --- a/api-ref/source/samples/flavor-show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "href": "/v2/flavors/testflavor", - "capabilities": [ - "FIFO", - "CLAIMS", - "DURABILITY", - "AOD", - "HIGH_THROUGHPUT" - ], - "pool_group": "testgroup", - "name": "testflavor" -} \ No newline at end of file diff --git a/api-ref/source/samples/flavor-update-request.json b/api-ref/source/samples/flavor-update-request.json deleted file mode 100644 index d2da2d88..00000000 --- a/api-ref/source/samples/flavor-update-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "pool_group": "testgroup" -} \ No newline at end of file diff --git a/api-ref/source/samples/flavor-update-response.json b/api-ref/source/samples/flavor-update-response.json deleted file mode 100644 index d8360e02..00000000 --- a/api-ref/source/samples/flavor-update-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "href": "/v2/flavors/testflavor", - "pool_group": "testgroup", - "name": "testflavor", - "capabilities": [ - "FIFO", - "CLAIMS", - "DURABILITY", - "AOD", - "HIGH_THROUGHPUT" - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/health-response.json b/api-ref/source/samples/health-response.json deleted file mode 100644 index 2d50b239..00000000 --- a/api-ref/source/samples/health-response.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "catalog_reachable": true, - "redis": { - "storage_reachable": true, - "operation_status": { - "post_messages": { - "seconds": 0.027673959732055664, - "ref": null, - "succeeded": true - }, - "delete_messages": { - "seconds": 0.0028481483459472656, - "ref": null, - "succeeded": true - }, - "delete_queue": { - "seconds": 0.017709016799926758, - "ref": null, - "succeeded": true - }, - "bulk_delete_messages": { - "seconds": 0.03959178924560547, - "ref": null, - "succeeded": true - }, - "create_queue": { - "seconds": 0.021075963973999023, - "ref": null, - "succeeded": true - }, - "list_messages": { - "seconds": 0.00003504753112792969, - "ref": null, - "succeeded": true - }, - "delete_claim": { - "seconds": 0.0006170272827148438, - "ref": null, - "succeeded": true - }, - "claim_messages": { - "seconds": 0.008388042449951172, - "ref": null, - "succeeded": true - } - } - } -} diff --git a/api-ref/source/samples/messages-get-byids-response.json b/api-ref/source/samples/messages-get-byids-response.json deleted file mode 100644 index 27cde69a..00000000 --- a/api-ref/source/samples/messages-get-byids-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "messages": [ - { - "body": { - "current_bytes": "0", - "event": "BackupProgress", - "total_bytes": "99614720" - }, - "age": 443, - "href": "/v2/queues/beijing/messages/578f0055508f153f256f717f", - "id": "578f0055508f153f256f717f", - "ttl": 3600 - } - ] -} diff --git a/api-ref/source/samples/messages-get-response.json b/api-ref/source/samples/messages-get-response.json deleted file mode 100644 index 8d945943..00000000 --- a/api-ref/source/samples/messages-get-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "body": { - "current_bytes": "0", - "event": "BackupProgress", - "total_bytes": "99614720" - }, - "age": 1110, - "href": "/v2/queues/beijing/messages/578f0055508f153f256f717f", - "id": "578f0055508f153f256f717f", - "ttl": 3600 -} diff --git a/api-ref/source/samples/messages-list-response.json b/api-ref/source/samples/messages-list-response.json deleted file mode 100644 index d1404931..00000000 --- a/api-ref/source/samples/messages-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "messages": [ - { - "body": { - "current_bytes": "0", - "event": "BackupProgress", - "total_bytes": "99614720" - }, - "age": 482, - "href": "/v2/queues/beijing/messages/578edfe6508f153f256f717b", - "id": "578edfe6508f153f256f717b", - "ttl": 3600 - }, - { - "body": { - "current_bytes": "0", - "event": "BackupProgress", - "total_bytes": "99614720" - }, - "age": 456, - "href": "/v2/queues/beijing/messages/578ee000508f153f256f717d", - "id": "578ee000508f153f256f717d", - "ttl": 3600 - } - ], - "links": [ - { - "href": "/v2/queues/beijing/messages?marker=17&echo=true", - "rel": "next" - } - ] -} diff --git a/api-ref/source/samples/messages-post-request.json b/api-ref/source/samples/messages-post-request.json deleted file mode 100644 index 67514026..00000000 --- a/api-ref/source/samples/messages-post-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "messages": [ - { - "ttl": 300, - "body": { - "event": "BackupStarted", - "backup_id": "c378813c-3f0b-11e2-ad92-7823d2b0f3ce" - } - }, - { - "body": { - "event": "BackupProgress", - "current_bytes": "0", - "total_bytes": "99614720" - } - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/messages-post-response.json b/api-ref/source/samples/messages-post-response.json deleted file mode 100644 index 8c15e535..00000000 --- a/api-ref/source/samples/messages-post-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "resources": [ - "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357", - "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924358" - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/pool-create-request.json b/api-ref/source/samples/pool-create-request.json deleted file mode 100644 index 5c1ea660..00000000 --- a/api-ref/source/samples/pool-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "weight": 100, - "uri": "mongodb://127.0.0.1:27017", - "options":{ - "max_retry_sleep": 1 - }, - "group": "poolgroup" -} \ No newline at end of file diff --git a/api-ref/source/samples/pool-list-response.json b/api-ref/source/samples/pool-list-response.json deleted file mode 100644 index 572630f6..00000000 --- a/api-ref/source/samples/pool-list-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "pools": [ - { - "href": "/v2/pools/test_pool1", - "group": "poolgroup", - "name": "test_pool1", - "weight": 60, - "uri": "mongodb://192.168.1.10:27017" - }, - { - "href": "/v2/pools/test_pool2", - "group": "poolgroup", - "name": "test_pool2", - "weight": 40, - "uri": "mongodb://192.168.1.20:27017" - } - ], - "links": [ - { - "href": "/v2/pools?marker=test_pool2", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/pool-show-response.json b/api-ref/source/samples/pool-show-response.json deleted file mode 100644 index f267ebf9..00000000 --- a/api-ref/source/samples/pool-show-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "href": "/v2/pools/test_pool", - "group": "testpoolgroup", - "name": "test_pool", - "weight": 100, - "uri": "mongodb://127.0.0.1:27017" -} \ No newline at end of file diff --git a/api-ref/source/samples/pool-update-request.json b/api-ref/source/samples/pool-update-request.json deleted file mode 100644 index 397be023..00000000 --- a/api-ref/source/samples/pool-update-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "weight": 60, - "uri": "mongodb://127.0.0.1:27017", - "options":{ - "max_retry_sleep": 1 - }, - "group": "newpoolgroup" -} \ No newline at end of file diff --git a/api-ref/source/samples/pool-update-response.json b/api-ref/source/samples/pool-update-response.json deleted file mode 100644 index b3f410d6..00000000 --- a/api-ref/source/samples/pool-update-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "href": "/v2/pools/test_pool", - "group": "newpoolgroup", - "name": "test_pool", - "weight": 60, - "uri": "mongodb://127.0.0.1:27017" -} \ No newline at end of file diff --git a/api-ref/source/samples/purge-queue-request.json b/api-ref/source/samples/purge-queue-request.json deleted file mode 100644 index 8722c822..00000000 --- a/api-ref/source/samples/purge-queue-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "resource_types": ["messages", "subscriptions"] -} \ No newline at end of file diff --git a/api-ref/source/samples/queue-create-request.json b/api-ref/source/samples/queue-create-request.json deleted file mode 100644 index 052ea935..00000000 --- a/api-ref/source/samples/queue-create-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "_max_messages_post_size": 262144, - "_default_message_ttl": 3600, - "description": "Queue for international traffic billing." -} \ No newline at end of file diff --git a/api-ref/source/samples/queue-pre-signed-request.json b/api-ref/source/samples/queue-pre-signed-request.json deleted file mode 100644 index 2b56f79d..00000000 --- a/api-ref/source/samples/queue-pre-signed-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "paths": ["messages", "claims", "subscriptions"], - "methods": ["GET", "POST", "PUT", "PATCH"], - "expires": "2016-09-01T00:00:00" -} \ No newline at end of file diff --git a/api-ref/source/samples/queue-pre-signed-response.json b/api-ref/source/samples/queue-pre-signed-response.json deleted file mode 100644 index 400cfecf..00000000 --- a/api-ref/source/samples/queue-pre-signed-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "project": "2887aabf368046a3bb0070f1c0413470", - "paths": [ - "/v2/queues/test/messages", - "/v2/queues/test/claims" - "/v2/queues/test/subscriptions" - ], - "expires": "2016-09-01T00:00:00", - "methods": [ - "GET", - "PATCH", - "POST", - "PUT" - ], - "signature": "6a63d63242ebd18c3518871dda6fdcb6273db2672c599bf985469241e9a1c799" -} diff --git a/api-ref/source/samples/queue-show-response.json b/api-ref/source/samples/queue-show-response.json deleted file mode 100644 index 2dd85e49..00000000 --- a/api-ref/source/samples/queue-show-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "_max_messages_post_size": 262144, - "_default_message_ttl": 3600, - "description": "Queue used for billing." -} \ No newline at end of file diff --git a/api-ref/source/samples/queue-stats-response.json b/api-ref/source/samples/queue-stats-response.json deleted file mode 100644 index 1bb147da..00000000 --- a/api-ref/source/samples/queue-stats-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "messages":{ - "claimed": 10, - "total": 20, - "free": 10 - } -} \ No newline at end of file diff --git a/api-ref/source/samples/queue-update-request.json b/api-ref/source/samples/queue-update-request.json deleted file mode 100644 index 7723199c..00000000 --- a/api-ref/source/samples/queue-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "op": "replace", - "path": "/metadata/max_timeout", - "value": 100 - } -] \ No newline at end of file diff --git a/api-ref/source/samples/queue-update-response.json b/api-ref/source/samples/queue-update-response.json deleted file mode 100644 index 2fbafcf2..00000000 --- a/api-ref/source/samples/queue-update-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "max_timeout": 100 -} \ No newline at end of file diff --git a/api-ref/source/samples/queues-list-response.json b/api-ref/source/samples/queues-list-response.json deleted file mode 100644 index 05b79bbf..00000000 --- a/api-ref/source/samples/queues-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "queues":[ - { - "href":"/v2/queues/beijing", - "name":"beijing" - }, - { - "href":"/v2/queues/london", - "name":"london" - }, - { - "href":"/v2/queues/wellington", - "name":"wellington" - } - ], - "links":[ - { - "href":"/v2/queues?marker=wellington", - "rel":"next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/subscription-create-request-http.json b/api-ref/source/samples/subscription-create-request-http.json deleted file mode 100644 index c18027d3..00000000 --- a/api-ref/source/samples/subscription-create-request-http.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "subscriber":"http://10.229.49.117:5679", - "ttl":3600, - "options":{} -} \ No newline at end of file diff --git a/api-ref/source/samples/subscription-create-request-mail.json b/api-ref/source/samples/subscription-create-request-mail.json deleted file mode 100644 index ee52e6d5..00000000 --- a/api-ref/source/samples/subscription-create-request-mail.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "subscriber":"mailto:test@gmail.com", - "ttl":3600, - "options":{ - "from": "Jack", - "subject": "Hello" - } -} \ No newline at end of file diff --git a/api-ref/source/samples/subscription-create-response.json b/api-ref/source/samples/subscription-create-response.json deleted file mode 100644 index 2b343077..00000000 --- a/api-ref/source/samples/subscription-create-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "subscription_id": "57692ab13990b48c644bb7e6" -} \ No newline at end of file diff --git a/api-ref/source/samples/subscription-show-response.json b/api-ref/source/samples/subscription-show-response.json deleted file mode 100644 index 38c2b699..00000000 --- a/api-ref/source/samples/subscription-show-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "age": 1632, - "id": "576b54963990b48c644bb7e7", - "subscriber": "http://10.229.49.117:5679", - "source": "test", - "ttl": 3600, - "options": { - "name": "test" - } -} \ No newline at end of file diff --git a/api-ref/source/samples/subscription-update-request.json b/api-ref/source/samples/subscription-update-request.json deleted file mode 100644 index 7176b4b1..00000000 --- a/api-ref/source/samples/subscription-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "subscriber":"http://10.229.49.117:1234", - "ttl":360, - "options":{ - "name": "test" - } -} \ No newline at end of file diff --git a/api-ref/source/samples/subscriptions-list-response.json b/api-ref/source/samples/subscriptions-list-response.json deleted file mode 100644 index 2f384aae..00000000 --- a/api-ref/source/samples/subscriptions-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "links": [ - { - "href": "/v2/queues/test/subscriptions?marker=57692ab13990b48c644bb7e6", - "rel": "next" - } - ], - "subscriptions": [ - { - "age": 13, - "id": "57692aa63990b48c644bb7e5", - "subscriber": "http://10.229.49.117:5678", - "source": "test", - "ttl": 360, - "options": {} - }, - { - "age": 2, - "id": "57692ab13990b48c644bb7e6", - "subscriber": "http://10.229.49.117:5679", - "source": "test", - "ttl": 360, - "options": {} - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/versions-list-response.json b/api-ref/source/samples/versions-list-response.json deleted file mode 100644 index fcd4a6cc..00000000 --- a/api-ref/source/samples/versions-list-response.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "versions":[ - { - "status":"DEPRECATED", - "updated":"2014-9-11T17:47:05Z", - "media-types":[ - { - "base":"application/json", - "type":"application/vnd.openstack.messaging-v1+json" - } - ], - "id":"1", - "links":[ - { - "href":"/v1/", - "rel":"self" - } - ] - }, - { - "status":"SUPPORTED", - "updated":"2014-9-24T04:06:47Z", - "media-types":[ - { - "base":"application/json", - "type":"application/vnd.openstack.messaging-v1_1+json" - } - ], - "id":"1.1", - "links":[ - { - "href":"/v1.1/", - "rel":"self" - } - ] - }, - { - "status":"CURRENT", - "updated":"2014-9-24T04:06:47Z", - "media-types":[ - { - "base":"application/json", - "type":"application/vnd.openstack.messaging-v2+json" - } - ], - "id":"2", - "links":[ - { - "href":"/v2/", - "rel":"self" - } - ] - } - ] -} \ No newline at end of file diff --git a/api-ref/source/subscription.inc b/api-ref/source/subscription.inc deleted file mode 100644 index d357521c..00000000 --- a/api-ref/source/subscription.inc +++ /dev/null @@ -1,229 +0,0 @@ -============================ -Subscriptions(subscriptions) -============================ -Subscriptions are relationships between queue/topic and the targeted -subscribers. After created subscriptions for a particular subscriber, like an -email or a webhook, then when new messages posted to the queue, the subscriber -will be notified automatically. - -List Subscriptions -================== - -.. rest_method:: GET /v2/queues/{queue_name}/subscriptions - -Lists a queue's subscriptions. - -This operation lists subscriptions for a queue. The subscriptions are sorted -alphabetically by name. - - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - -Query Parameters -~~~~~~~~~~~~~~~~ - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - subscriptions: subscriptions - - links: links - - -Response Example ----------------- - -.. literalinclude:: samples/subscriptions-list-response.json - :language: javascript - - -Create Subscription -=================== - -.. rest_method:: POST /v2/queues/{queue_name}/subscriptions - -Creates a subscription. - -This operation creates a new subscription. - - -Normal response codes: 201 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - subscriber: subscriber - - ttl: subscription_ttl - - options: subscription_options - - -Request Example ---------------- - -.. literalinclude:: samples/subscription-create-request-http.json - :language: javascript - -.. literalinclude:: samples/subscription-create-request-mail.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - subscription_id: subscription_id - - -Response Example ----------------- - -.. literalinclude:: samples/subscription-create-response.json - :language: javascript - - -Update Subscription -=================== - -.. rest_method:: PATCH /v2/queues/{queue_name}/subscriptions/{subscription_id} - -Updates a subscription. - -Normal response codes: 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- Not Found (404) -- Conflict (409) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - subscription_id: subscription_id_path - - subscriber: subscriber - - ttl: subscription_ttl - - options: subscription_options - - -Request Example ---------------- - -.. literalinclude:: samples/subscription-update-request.json - :language: javascript - - -This operation does not return a response body. - - -Show Subscription Details -========================= - -.. rest_method:: GET /v2/queues/{queue_name}/subscriptions/{subscription_id} - -Shows details for a subscription. - -Normal response codes: 200 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - subscription_id: subscription_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - age: subscription_age - - id: subscription_id - - subscriber: subscriber - - source: subscription_source - - ttl: subscription_ttl - - options: subscription_options - - -Response Example ----------------- - -.. literalinclude:: samples/subscription-show-response.json - :language: javascript - - -Delete Subscription -=================== - -.. rest_method:: DELETE /v2/queues/{queue_name}/subscriptions/{subscription_id} - -Deletes the specified subscription. - - -Normal response codes: 204 - -Error response codes: - -- BadRequest (400) -- Unauthorized (401) -- ServiceUnavailable (503) - - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - queue_name: queue_name - - subscription_id: subscription_id_path - - -This operation does not accept a request body and does not return a response -body. diff --git a/api-ref/source/versions.inc b/api-ref/source/versions.inc deleted file mode 100644 index b8a67179..00000000 --- a/api-ref/source/versions.inc +++ /dev/null @@ -1,40 +0,0 @@ -============ -API Versions -============ - -The Zaqar API only supports ''major versions'' expressed in request URLs. - - -List major versions -=================== - -.. rest_method:: GET / - -Gets the home document. - -This operation gets the home document. - -The entire API is discoverable from a single starting point, the home document. To explore the entire API, you need to know only this one URI. This document is cacheable. - -The home document lets you write clients by using relational links, so clients do not have to construct their own URLs. You can click through and view the JSON doc in your browser. - -For more information about home documents, see `http://tools.ietf.org/html/draft-nottingham-json-home-02 `__. - - -Normal response codes: 300 -Error response codes: - -- serviceUnavailable (503) - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - -Response Example ----------------- - -.. literalinclude:: samples/versions-list-response.json - :language: javascript diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/bench-requirements.txt b/bench-requirements.txt deleted file mode 100644 index 2026a02f..00000000 --- a/bench-requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -gevent>=1.0.1 -marktime>=0.2.0 -python-zaqarclient>=1.1.0 -os-client-config>=1.13.1 # Apache-2.0 \ No newline at end of file diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index f390e445..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,15 +0,0 @@ -==================== -Enabling in Devstack -==================== - -1. Download DevStack:: - - $ git clone https://git.openstack.org/openstack-dev/devstack - $ cd devstack - -2. Add the following repo as an external repository:: - - [[local|localrc]] - enable_plugin zaqar https://git.openstack.org/openstack/zaqar - -3. Run ``./stack.sh`` diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh deleted file mode 100755 index 82d50f67..00000000 --- a/devstack/gate/gate_hook.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside gate_hook function in devstack gate. - -OVERRIDE_ENABLED_SERVICES="mysql,key,tempest,zaqar-websocket,zaqar-wsgi" - -export DEVSTACK_GATE_ZAQAR=1 -export DEVSTACK_GATE_INSTALL_TESTONLY=1 -export DEVSTACK_GATE_NO_SERVICES=1 -export DEVSTACK_GATE_TEMPEST=0 -export DEVSTACK_GATE_EXERCISES=0 -export DEVSTACK_GATE_TIMEOUT=90 -export KEEP_LOCALRC=1 - -export DEVSTACK_GATE_ZAQAR_TEST_SUITE=$1 -# NOTE(flaper87): Backwards compatibility until `project-config`'s -# patch lands. -export DEVSTACK_GATE_ZAQAR_BACKEND=${2:-$DEVSTACK_GATE_ZAQAR_TEST_SUITE} -if [ "$DEVSTACK_GATE_ZAQAR_BACKEND" == "swift" ]; then - OVERRIDE_ENABLED_SERVICES+=,s-proxy,s-object,s-container,s-account -fi -export DEVSTACK_LOCAL_CONFIG+=$" -ZAQAR_BACKEND=$DEVSTACK_GATE_ZAQAR_BACKEND" -export OVERRIDE_ENABLED_SERVICES - -function run_devstack_gate() { - $BASE/new/devstack-gate/devstack-vm-gate.sh -} - -function run_tempest_tests() { - export DEVSTACK_GATE_TEMPEST=1 - export DEVSTACK_GATE_TEMPEST_NOTESTS=1 - run_devstack_gate - - cd $BASE/new/tempest/ - sudo -E testr init - sudo -E tox -eall-plugin zaqar -} - -function run_zaqarclient_tests() { - run_devstack_gate - cd $BASE/new/python-zaqarclient - - source $BASE/new/devstack/openrc - cat /etc/mongodb.conf - ZAQARCLIENT_AUTH_FUNCTIONAL=1 nosetests tests.functional -} - -case "$DEVSTACK_GATE_ZAQAR_TEST_SUITE" in - tempest) - run_tempest_tests - ;; - zaqarclient) - run_zaqarclient_tests - ;; - *) - # NOTE(flaper87): Eventually, this will error - run_zaqarclient_tests - ;; -esac - diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh deleted file mode 100755 index 2e2f6267..00000000 --- a/devstack/gate/post_test_hook.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -# source $BASE/new/devstack/openrc admin admin - -function generate_test_results { - if [ -f .testrepository/0 ]; then - sudo .tox/py27-gate/bin/testr last --subunit > $WORKSPACE/testrepository.subunit - sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo .tox/py27-gate/bin/python /usr/local/jenkins/slave_scripts/subunit2html.py $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html - sudo gzip -9 $BASE/logs/testrepository.subunit - sudo gzip -9 $BASE/logs/testr_results.html - sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - fi -} - -set -x - -export ZAQAR_DIR="$BASE/new/zaqar" -sudo chown -R stack:stack $ZAQAR_DIR -cd $ZAQAR_DIR - -# Collect and parse result -generate_test_results -exit $EXIT_CODE diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100755 index cd657c37..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,320 +0,0 @@ -#!/bin/bash -# -# lib/zaqar -# Install and start **Zaqar** service - -# To enable a minimal set of Zaqar services, add the following to localrc: -# -# enable_service zaqar-websocket zaqar-wsgi -# -# Dependencies: -# - functions -# - OS_AUTH_URL for auth in api -# - DEST set to the destination directory -# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api -# - STACK_USER service user - -# stack.sh -# --------- -# install_zaqar -# install_zaqarui -# configure_zaqar -# init_zaqar -# start_zaqar -# stop_zaqar -# cleanup_zaqar -# cleanup_zaqar_mongodb - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -# Test if any Zaqar services are enabled -# is_zaqar_enabled -function is_zaqar_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"zaqar" ]] && return 0 - return 1 -} - -# cleanup_zaqar() - Cleans up general things from previous -# runs and storage specific left overs. -function cleanup_zaqar { - if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then - cleanup_zaqar_mongodb - fi -} - -# cleanup_zaqar_mongodb() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_zaqar_mongodb { - if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo zaqar --eval 'db.dropDatabase();'; do sleep 1; done"; then - die $LINENO "Mongo DB did not start" - else - full_version=$(mongo zaqar --eval 'db.dropDatabase();') - mongo_version=`echo $full_version | cut -d' ' -f4` - required_mongo_version='2.2' - if [[ $mongo_version < $required_mongo_version ]]; then - die $LINENO "Zaqar needs Mongo DB version >= 2.2 to run." - fi - fi -} - -# configure_zaqarclient() - Set config files, create data dirs, etc -function configure_zaqarclient { - setup_develop $ZAQARCLIENT_DIR -} - -# configure_zaqar() - Set config files, create data dirs, etc -function configure_zaqar { - setup_develop $ZAQAR_DIR - - [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR - sudo chown $USER $ZAQAR_CONF_DIR - - if [[ -f $ZAQAR_DIR/etc/policy.json.sample ]]; then - cp -p $ZAQAR_DIR/etc/policy.json.sample $ZAQAR_POLICY_CONF - fi - - [ ! -d $ZAQAR_API_LOG_DIR ] && sudo mkdir -m 755 -p $ZAQAR_API_LOG_DIR - sudo chown $USER $ZAQAR_API_LOG_DIR - - iniset $ZAQAR_CONF DEFAULT debug True - iniset $ZAQAR_CONF DEFAULT unreliable True - iniset $ZAQAR_CONF DEFAULT admin_mode True - iniset $ZAQAR_CONF DEFAULT enable_deprecated_api_versions 1,1.1 - iniset $ZAQAR_CONF signed_url secret_key notreallysecret - - if is_service_enabled key; then - iniset $ZAQAR_CONF DEFAULT auth_strategy keystone - fi - - iniset $ZAQAR_CONF storage message_pipeline zaqar.notification.notifier - - # Enable pooling by default for now - iniset $ZAQAR_CONF DEFAULT admin_mode True - iniset $ZAQAR_CONF 'drivers:transport:websocket' bind $ZAQAR_SERVICE_HOST - iniset $ZAQAR_CONF 'drivers:transport:websocket' port $ZAQAR_WEBSOCKET_PORT - iniset $ZAQAR_CONF drivers transport websocket - - configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR - - iniset $ZAQAR_CONF trustee auth_type password - iniset $ZAQAR_CONF trustee auth_url $KEYSTONE_AUTH_URI - iniset $ZAQAR_CONF trustee username $ZAQAR_TRUSTEE_USER - iniset $ZAQAR_CONF trustee password $ZAQAR_TRUSTEE_PASSWORD - iniset $ZAQAR_CONF trustee user_domain_id $ZAQAR_TRUSTEE_DOMAIN - - iniset $ZAQAR_CONF DEFAULT pooling True - iniset $ZAQAR_CONF 'pooling:catalog' enable_virtual_pool True - - # NOTE(flaper87): Configure mongodb regardless so we can use it as a pool - # in tests. - configure_mongodb - - if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then - iniset $ZAQAR_CONF drivers message_store mongodb - iniset $ZAQAR_CONF 'drivers:message_store:mongodb' uri mongodb://localhost:27017/zaqar - iniset $ZAQAR_CONF 'drivers:message_store:mongodb' database zaqar - - iniset $ZAQAR_CONF drivers management_store mongodb - iniset $ZAQAR_CONF 'drivers:management_store:mongodb' uri mongodb://localhost:27017/zaqar_mgmt - iniset $ZAQAR_CONF 'drivers:management_store:mongodb' database zaqar_mgmt - elif [ "$ZAQAR_BACKEND" = 'redis' ] ; then - recreate_database zaqar - iniset $ZAQAR_CONF drivers management_store sqlalchemy - iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' uri `database_connection_url zaqar` - iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' database zaqar_mgmt - - zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head - - iniset $ZAQAR_CONF drivers message_store redis - iniset $ZAQAR_CONF 'drivers:message_store:redis' uri redis://localhost:6379 - iniset $ZAQAR_CONF 'drivers:message_store:redis' database zaqar - configure_redis - elif [ "$ZAQAR_BACKEND" = 'swift' ] ; then - recreate_database zaqar - iniset $ZAQAR_CONF drivers management_store sqlalchemy - iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' uri `database_connection_url zaqar` - iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' database zaqar_mgmt - - zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head - - iniset $ZAQAR_CONF drivers message_store swift - iniset $ZAQAR_CONF 'drivers:message_store:swift' auth_url $KEYSTONE_AUTH_URI_V3 - iniset $ZAQAR_CONF 'drivers:message_store:swift' uri swift://zaqar:$SERVICE_PASSWORD@/service - fi - - if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $ZAQAR_CONF DEFAULT notification_driver messaging - iniset $ZAQAR_CONF DEFAULT control_exchange zaqar - fi - iniset_rpc_backend zaqar $ZAQAR_CONF DEFAULT - - pip_install uwsgi - iniset $ZAQAR_UWSGI_CONF uwsgi http $ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT - iniset $ZAQAR_UWSGI_CONF uwsgi harakiri 60 - iniset $ZAQAR_UWSGI_CONF uwsgi processes 1 - iniset $ZAQAR_UWSGI_CONF uwsgi threads 4 - iniset $ZAQAR_UWSGI_CONF uwsgi wsgi-file $ZAQAR_DIR/zaqar/transport/wsgi/app.py - iniset $ZAQAR_UWSGI_CONF uwsgi master true - iniset $ZAQAR_UWSGI_CONF uwsgi add-header "Connection: close" - - cleanup_zaqar -} - -function configure_redis { - if is_ubuntu; then - install_package redis-server - pip_install redis - elif is_fedora; then - install_package redis - pip_install redis - else - exit_distro_not_supported "redis installation" - fi -} - -function configure_mongodb { - # Set nssize to 2GB. This increases the number of namespaces supported - # per database. - pip_install pymongo - if is_ubuntu; then - install_package mongodb-server - if ! grep -qF "smallfiles = true" /etc/mongodb.conf; then - echo "smallfiles = true" | sudo tee --append /etc/mongodb.conf > /dev/null - fi - restart_service mongodb - elif is_fedora; then - install_package mongodb - install_package mongodb-server - sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod - restart_service mongod - fi -} - -# init_zaqar() - Initialize etc. -function init_zaqar { - # Create cache dir - sudo mkdir -p $ZAQAR_AUTH_CACHE_DIR - sudo chown $STACK_USER $ZAQAR_AUTH_CACHE_DIR - rm -f $ZAQAR_AUTH_CACHE_DIR/* -} - -# install_zaqar() - Collect source and prepare -function install_zaqar { - setup_develop $ZAQAR_DIR - - if is_service_enabled horizon; then - install_zaqarui - fi -} - -function install_zaqarui { - git_clone $ZAQARUI_REPO $ZAQARUI_DIR $ZAQARUI_BRANCH - # NOTE(flwang): Workaround for devstack bug: 1540328 - # where devstack install 'test-requirements' but should not do it - # for zaqar-ui project as it installs Horizon from url. - # Remove following two 'mv' commands when mentioned bug is fixed. - mv $ZAQARUI_DIR/test-requirements.txt $ZAQARUI_DIR/_test-requirements.txt - setup_develop $ZAQARUI_DIR - mv $ZAQARUI_DIR/_test-requirements.txt $ZAQARUI_DIR/test-requirements.txt - cp -a $ZAQARUI_DIR/zaqar_ui/enabled/* $HORIZON_DIR/openstack_dashboard/local/enabled/ - if [ -d $ZAQARUI_DIR/zaqar-ui/locale ]; then - (cd $ZAQARUI_DIR/zaqar-ui; DJANGO_SETTINGS_MODULE=openstack_dashboard.settings ../manage.py compilemessages) - fi -} - -# install_zaqarclient() - Collect source and prepare -function install_zaqarclient { - git_clone $ZAQARCLIENT_REPO $ZAQARCLIENT_DIR $ZAQARCLIENT_BRANCH - # NOTE(flaper87): Ideally, this should be developed, but apparently - # there's a bug in devstack that skips test-requirements when using - # setup_develop - setup_install $ZAQARCLIENT_DIR -} - -# start_zaqar() - Start running processes, including screen -function start_zaqar { - cat $ZAQAR_UWSGI_CONF - run_process zaqar-wsgi "$ZAQAR_BIN_DIR/uwsgi --ini $ZAQAR_UWSGI_CONF --pidfile2 $ZAQAR_UWSGI_MASTER_PIDFILE" - run_process zaqar-websocket "$ZAQAR_BIN_DIR/zaqar-server --config-file $ZAQAR_CONF" - - echo "Waiting for Zaqar to start..." - token=$(openstack token issue -c id -f value) - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q --header=\"Client-ID:$(uuidgen)\" --header=\"X-Auth-Token:$token\" -O- $ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT/v2/ping; do sleep 1; done"; then - die $LINENO "Zaqar did not start" - fi -} - -# stop_zaqar() - Stop running processes -function stop_zaqar { - local serv - # Kill the zaqar screen windows - for serv in zaqar-wsgi zaqar-websocket; do - screen -S $SCREEN_NAME -p $serv -X kill - done - uwsgi --stop $ZAQAR_UWSGI_MASTER_PIDFILE -} - -function create_zaqar_accounts { - create_service_user "zaqar" - - if [[ "$KEYSTONE_IDENTITY_BACKEND" = 'sql' ]]; then - - local zaqar_service=$(get_or_create_service "zaqar" \ - "messaging" "Zaqar Service") - get_or_create_endpoint $zaqar_service \ - "$REGION_NAME" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" - - local zaqar_ws_service=$(get_or_create_service "zaqar-websocket" \ - "messaging-websocket" "Zaqar Websocket Service") - get_or_create_endpoint $zaqar_ws_service \ - "$REGION_NAME" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" - fi - - if [ "$ZAQAR_BACKEND" = 'swift' ] ; then - get_or_add_user_project_role ResellerAdmin zaqar service - fi -} - -if is_service_enabled zaqar-websocket || is_service_enabled zaqar-wsgi; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Zaqar" - install_zaqarclient - install_zaqar - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Zaqar" - configure_zaqar - configure_zaqarclient - - if is_service_enabled key; then - create_zaqar_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Zaqar" - init_zaqar - start_zaqar - fi - - if [[ "$1" == "unstack" ]]; then - stop_zaqar - fi -fi - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index fd5f98f6..00000000 --- a/devstack/settings +++ /dev/null @@ -1,48 +0,0 @@ -# Set up default directories -#--------------------------- - -ZAQAR_DIR=$DEST/zaqar -ZAQARCLIENT_DIR=$DEST/python-zaqarclient -ZAQAR_CONF_DIR=/etc/zaqar -ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf -ZAQAR_POLICY_CONF=$ZAQAR_CONF_DIR/policy.json -ZAQAR_UWSGI_CONF=$ZAQAR_CONF_DIR/uwsgi.conf -ZAQAR_UWSGI_MASTER_PIDFILE=/tmp/uwsgizaqarmasterprocess.pid -ZAQAR_API_LOG_DIR=/var/log/zaqar -ZAQAR_API_LOG_FILE=$ZAQAR_API_LOG_DIR/queues.log -ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar} - -# Support potential entry-points console scripts -ZAQAR_BIN_DIR=$(get_python_exec_prefix) - -# Set up database backend -ZAQAR_BACKEND=${ZAQAR_BACKEND:-mongodb} - -# Set Zaqar repository -ZAQAR_REPO=${ZAQAR_REPO:-${GIT_BASE}/openstack/zaqar.git} -ZAQAR_BRANCH=${ZAQAR_BRANCH:-master} - -# Set client library repository -ZAQARCLIENT_REPO=${ZAQARCLIENT_REPO:-${GIT_BASE}/openstack/python-zaqarclient.git} -ZAQARCLIENT_BRANCH=${ZAQARCLIENT_BRANCH:-master} - -# Set Zaqar UI repository -ZAQARUI_DIR=$DEST/zaqar-ui -ZAQARUI_REPO=${ZAQARUI_REPO:-${GIT_BASE}/openstack/zaqar-ui.git} -ZAQARUI_BRANCH=${ZAQARUI_BRANCH:-$ZAQAR_BRANCH} - -# Set Zaqar Connection Info -ZAQAR_SERVICE_HOST=${ZAQAR_SERVICE_HOST:-$SERVICE_HOST} -ZAQAR_SERVICE_PORT=${ZAQAR_SERVICE_PORT:-8888} -ZAQAR_WEBSOCKET_PORT=${ZAQAR_WEBSOCKET_PORT:-9000} -ZAQAR_SERVICE_PROTOCOL=${ZAQAR_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Set Zaqar trust configuration -ZAQAR_TRUSTEE_USER=${ZAQAR_TRUSTEE_USER:-zaqar} -ZAQAR_TRUSTEE_PASSWORD=${ZAQAR_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD} -ZAQAR_TRUSTEE_DOMAIN=${ZAQAR_TRUSTEE_DOMAIN:-default} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,zaqar - -enable_service zaqar-websocket zaqar-wsgi diff --git a/devstack/upgrade/resource.sh b/devstack/upgrade/resource.sh deleted file mode 100755 index 7018ba5b..00000000 --- a/devstack/upgrade/resource.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -# -# Copyright 2017 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -o errexit - -source $GRENADE_DIR/grenaderc -source $GRENADE_DIR/functions - -source $TOP_DIR/openrc admin admin - -ZAQAR_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) -source $ZAQAR_DEVSTACK_DIR/settings - -set -o xtrace - - -function create { - # TODO(flwang): Create queue, create subscriptions, post messages, - # delete queue - : -} - -function verify { - # TODO(flwang): Get queue, get messages, get subscriptions - : -} - -function verify_noapi { - : -} - -function destroy { - # TODO(flwang): Purge queue, delete queue - : -} - -# Dispatcher -case $1 in - "create") - create - ;; - "verify") - verify - ;; - "verify_noapi") - verify_noapi - ;; - "destroy") - destroy - ;; - "force_destroy") - set +o errexit - destroy - ;; -esac - diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings deleted file mode 100644 index 8ef7d450..00000000 --- a/devstack/upgrade/settings +++ /dev/null @@ -1,19 +0,0 @@ -# Grenade needs to know that Zaqar has a Grenade plugin. This is done in the -# gate by setting GRENADE_PLUGINRC when using openstack-infra/devstack-gate. -# That means that in the project openstack-infra/project-config we will need to -# update the Zaqar grenade job(s) in jenkins/jobs/devstack-gate.yaml with -# this: -# export GRENADE_PLUGINRC="enable_grenade_plugin zaqar https://git.openstack.org/openstack/zaqar" -# If openstack-infra/project-config is not updated then the Grenade tests will -# never get run for Zaqar - -register_project_for_upgrade zaqar - - -if grep -q 'management_store *= *sqlalchemy' /etc/zaqar/zaqar.conf; then - register_db_to_save zaqar -fi - -devstack_localrc base enable_service zaqar-wsgi zaqar-websocket zaqar - -devstack_localrc target enable_service zaqar-wsgi zaqar-websocket zaqar \ No newline at end of file diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh deleted file mode 100755 index 4df9d615..00000000 --- a/devstack/upgrade/shutdown.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# -# - -set -o errexit - -source $GRENADE_DIR/grenaderc -source $GRENADE_DIR/functions - -# We need base DevStack functions for this -source $BASE_DEVSTACK_DIR/functions -source $BASE_DEVSTACK_DIR/stackrc # needed for status directory -source $BASE_DEVSTACK_DIR/lib/tls - -# Keep track of the DevStack directory -ZAQAR_DEVSTACK_DIR=$(dirname "$0")/.. -source $ZAQAR_DEVSTACK_DIR/settings -source $ZAQAR_DEVSTACK_DIR/plugin.sh - -set -o xtrace - -for serv in zaqar-websocket; do - stop_process $serv -done - -uwsgi --stop $ZAQAR_UWSGI_MASTER_PIDFILE \ No newline at end of file diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh deleted file mode 100755 index 6fc24df8..00000000 --- a/devstack/upgrade/upgrade.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash - -# ``upgrade-zaqar`` - -echo "*********************************************************************" -echo "Begin $0" -echo "*********************************************************************" - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - echo "*********************************************************************" - echo "ERROR: Abort $0" - echo "*********************************************************************" - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM - -# Keep track of the grenade directory -RUN_DIR=$(cd $(dirname "$0") && pwd) - -# Source params -source $GRENADE_DIR/grenaderc - -source $TOP_DIR/openrc admin admin - -# Import common functions -source $GRENADE_DIR/functions - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -if grep -q 'management_store *= *mongodb' /etc/zaqar/zaqar.conf; then - mongodump --db zaqar_mgmt --out $SAVE_DIR/zaqar-mongodb-mgmt-dump.$BASE_RELEASE -fi - -if grep -q 'message_store *= *mongodb' /etc/zaqar/zaqar.conf; then - mongodump --db zaqar --out $SAVE_DIR/zaqar-mongodb-message-dump.$BASE_RELEASE -fi - -if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then - redis-cli save - cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$BASE_RELEASE.rdb -fi - -# Upgrade Zaqar -# ============= - -# Duplicate some setup bits from target DevStack -source $TARGET_DEVSTACK_DIR/stackrc -source $TARGET_DEVSTACK_DIR/lib/tls - -# Keep track of the DevStack directory -ZAQAR_DEVSTACK_DIR=$(dirname "$0")/.. -source $ZAQAR_DEVSTACK_DIR/settings -source $ZAQAR_DEVSTACK_DIR/plugin.sh - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -function wait_for_keystone { - if ! wait_for_service $SERVICE_TIMEOUT ${KEYSTONE_AUTH_URI}/v$IDENTITY_API_VERSION/; then - die $LINENO "keystone did not start" - fi -} - -# Save current config files for posterity -[[ -d $SAVE_DIR/etc.zaqar ]] || cp -pr $ZAQAR_CONF_DIR $SAVE_DIR/etc.zaqar - -stack_install_service zaqar - -if grep -q 'management_store *= *sqlalchemy' /etc/zaqar/zaqar.conf; then - zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head || die $LINENO "DB sync error" -fi - -# calls upgrade-zaqar for specific release -upgrade_project zaqar $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH - -start_zaqar -wait_for_keystone - - -# Don't succeed unless the services come up -ensure_services_started zaqar-server - -if grep -q 'management_store *= *mongodb' /etc/zaqar/zaqar.conf; then - mongodump --db zaqar_mgmt --out $SAVE_DIR/zaqar-mongodb-mgmt-dump.$TARGET_RELEASE -fi - -if grep -q 'message_store *= *mongodb' /etc/zaqar/zaqar.conf; then - mongodump --db zaqar --out $SAVE_DIR/zaqar-mongodb-message-dump.$TARGET_RELEASE -fi - -if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then - redis-cli save - cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$TARGET_RELEASE.rdb -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End $0" -echo "*********************************************************************" diff --git a/doc/README.md b/doc/README.md deleted file mode 100644 index 79fa9633..00000000 --- a/doc/README.md +++ /dev/null @@ -1,2 +0,0 @@ -Message-Queuing -=============== \ No newline at end of file diff --git a/doc/source/admin/CORS.rst b/doc/source/admin/CORS.rst deleted file mode 100644 index 7e5c4ccb..00000000 --- a/doc/source/admin/CORS.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========== -CORS Guide -========== - -Zaqar supports Cross-Origin Resource Sharing (CORS) now. The function is -provided by oslo.middleware. Please see `Official Doc`_ and `OpenStack Spec`_ -for more detail. This guide is mainly tell users how to use it in Zaqar. - - -New Config Options ------------------- - -There are some new config options. - -**allowed_origin** - -Indicate whether this resource may be shared with the domain received in the -requests "origin" header. Format: "://[:]", no trailing -slash. Example: https://horizon.example.com'. - -**allow_credentials** - -Indicate that the actual request can include user credentials. The default -value is True. - -**expose_headers** - -Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -Headers. The default value is []. - -**max_age** - -Maximum cache age of CORS preflight requests. The default value is 3600. - -**allow_methods** - -Indicate which methods can be used during the actual request. The default value -is ['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'PATCH']. - -**allow_headers** - -Indicate which header field names may be used during the actual request. The -default value is []. - - -Request and Response example ----------------------------- -To use CORS, you should make sure that the feature is enabled:: - - [cors] - enabled = true - allowed_origin = http://example - allow_methods = GET - -the above example config options mean that Zaqar only receive the GET request -from http://example domain. Here are some example request: -1. Zaqar will do nothing if the request doesn't contain "Origin" header:: - - # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" - - HTTP/1.1 300 Multiple Choices - content-length: 668 - content-type: application/json; charset=UTF-8 - Connection: close - -2. Zaqar will return nothing in response headers if the "Origin" is not in -``allowed_origin``:: - - # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://" - - HTTP/1.1 300 Multiple Choices - content-length: 668 - content-type: application/json; charset=UTF-8 - Connection: close - -In the Zaqar log, we can see a message:: - - CORS request from origin 'http://' not permitted. - -3. Zaqar will return CORS information if the "Origin" header is in -``allowed_origin``:: - - # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://example" - - HTTP/1.1 300 Multiple Choices - content-length: 668 - content-type: application/json; charset=UTF-8 - Vary: Origin - Access-Control-Allow-Origin: http://example - Access-Control-Allow-Credentials: true - Connection: close - -4. Zaqar will return more information if the request doesn't follow Zaqar's\ -CORS rule:: - - # curl -I -X PUT http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://example" - HTTP/1.1 405 Method Not Allowed - content-length: 0 - content-type: application/json; charset=UTF-8 - allow: GET, OPTIONS - Vary: Origin - Access-Control-Allow-Origin: http://example - Access-Control-Allow-Credentials: true - Connection: close - -.. _Official Doc: https://docs.openstack.org/developer/oslo.middleware/cors.html -.. _OpenStack Spec: http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html diff --git a/doc/source/admin/OSprofiler.rst b/doc/source/admin/OSprofiler.rst deleted file mode 100644 index 70d8d196..00000000 --- a/doc/source/admin/OSprofiler.rst +++ /dev/null @@ -1,124 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================ -OSprofiler Guide -================ - -OSprofiler is a library from oslo. It's used for performance analysis. Please -see `Official Doc`_ for more detail. - -Preparation ------------ -OSprofiler now supports some kind of backends, such as Ceilometer, ElasticSearch -, Messaging and MongoDB. - -.. note:: 1. Ceilometer is only used for data collection, and Messaging is only - used for data transfer. So Ceilometer only works when Messaging is enabled. - 2. ElasticSearch and MongoDB support both data collection and transfer. So - they can be used standalone. - -In this guide, we take MongoDB for example. - -There are some new config options. - -**enabled** - -Enables the profiling for all services on this node. Default value is False -(fully disable the profiling feature). This function may bring down Zaqar's -performance, so please set to disable in production environment. - -**connection_string** - -Connection string for a notifier backend. Default value is messaging:// which -sets the notifier to oslo_messaging. Here we set it to "mongodb://localhost:27017" - -**hmac_keys** - -Secret key(s) to use for encrypting context data for performance profiling. -This string value should have the following format: [,,...], -where each key is some random string. A user who triggers the profiling via -the REST API has to set one of these keys in the headers of the REST API call -to include profiling results of this node for this particular project. - -**trace_wsgi_transport**, **trace_message_store** and **trace_management_store** - -The three layers during a user's request flow. Set to True to enable tracing -for each layer. - -So In this example, we should add the following config options:: - - [profiler] - enabled = True - connection_string = mongodb://localhost:27017 - hmac_keys = 123 - trace_wsgi_transport = True - trace_message_store = True - trace_management_store = True - -.. note:: If you want to use MQ and Ceilometer, please leave the - **connection_string** empty or indicate the MQ information. And please make - sure that the following config options have be set in Ceilometer.conf - -:: - - [DEFAULT] - event_dispatchers = database - - [oslo_messaging_notifications] - topics = notifications, profiler - -Then restart Zaqar service - -Command Line ------------- - -we can use OpenStack Client to analyse the user request now. For example, if we -want to know the performance for "queue list", we can do like this: - -1. OpenStack Client now supports OSprofiler by default. Only thing we need to -do is adding ``--os-profile {hmac_keys}`` in the command:: - - openstack queue list --os-profile 123 - -"123" here is what we set in Zaqar config file. After the request is done, -OpenStack Client will return a trace ID like:: - - Trace ID: 2902c7a3-ee18-4b08-aae7-4e34388f9352 - Display trace with command: - osprofiler trace show --html 2902c7a3-ee18-4b08-aae7-4e34388f9352 - -Now the trace information has been stored in MongoDB already. - -2. Use the command from the openstack client return information. The osprofiler -command uses Ceilometer for data collection by default, so we need use -``--connection-string`` to change it to mongoDB here:: - - osprofiler trace show --html 2902c7a3-ee18-4b08-aae7-4e34388f9352 --connection-string mongodb://localhost:27017 - -Then you can see the analysis information in html format now. - -It also supports json format:: - - osprofiler trace show --json 2902c7a3-ee18-4b08-aae7-4e34388f9352 --connection-string mongodb://localhost:27017 - -Of course it supports to save the result to a file:: - - osprofiler trace show --json 2902c7a3-ee18-4b08-aae7-4e34388f9352 --out list_test --connection-string mongodb://localhost:27017 - -Then you can open the file "list_test" to get the result. - -.. note:: If you used MQ for data transfer, the "--connection-string" here - could be ignored or set it to your Ceilometer endpoint. - -.. _Official Doc: http://docs.openstack.org/developer/osprofiler/background.html diff --git a/doc/source/admin/gmr.rst b/doc/source/admin/gmr.rst deleted file mode 100644 index 405a3c7f..00000000 --- a/doc/source/admin/gmr.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. - Copyright (c) 2017 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======================= -Guru Meditation Reports -======================= - -Zaqar contains a mechanism whereby developers and system administrators can -generate a report about the state of a running Zaqar executable. -This report is called a *Guru Meditation Report* (*GMR* for short). - -Generating a GMR ----------------- - -For wsgi and websocket mode, a *GMR* can be generated by sending the *USR2* -signal to any Zaqar process with support (see below). -The *GMR* will then be outputted standard error for that particular process. - -For example, suppose that ``zaqar-server`` has process id ``8675``, and was -run with ``2>/var/log/zaqar/zaqar-server-err.log``. -Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be -printed to ``/var/log/zaqar/zaqar-server-err.log``. - -For uwsgi mode, user should add a configuration in Zaqar's conf file:: - - [oslo_reports] - file_event_handler=['The path to a file to watch for changes to trigger ' - 'the reports, instead of signals. Setting this option ' - 'disables the signal trigger for the reports.'] - file_event_handler_interval=['How many seconds to wait between polls when ' - 'file_event_handler is set, default value ' - 'is 1'] - -For example, you can specify "file_event_handler=/tmp/guru_report" and -"file_event_handler_interval=1" in Zaqar's conf file. - -A *GMR* can be generated by "touch"ing the file which was specified in -file_event_handler. The *GMR* will then output to standard error for -that particular process. - -For example, suppose that ``zaqar-server`` was run with -``2>/var/log/zaqar/zaqar-server-err.log``, and the file path is -``/tmp/guru_report``. -Then, ``touch /tmp/guru_report`` will trigger the Guru Meditation report to be -printed to ``/var/log/zaqar/zaqar-server-err.log``. - -Structure of a GMR ------------------- - -The *GMR* is designed to be extensible; any particular executable may add -its own sections. However, the base *GMR* consists of several sections: - -Package - Shows information about the package to which this process belongs, - including version information - -Threads - Shows stack traces and thread ids for each of the threads within this process - -Green Threads - Shows stack traces for each of the green threads within this process - (green threads don't have thread ids) - -Configuration - Lists all the configuration options currently accessible via the CONF object - for the current process - -Extending the GMR ------------------ - -As mentioned above, additional sections can be added to the GMR for a -particular executable. For more information, see the inline documentation -about oslo.reports: -`oslo.reports `_ diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 8f0244ef..00000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -==================== -Administration Guide -==================== - -.. toctree:: - :maxdepth: 2 - - subscription_confirm - OSprofiler - CORS - gmr - running_benchmark - writing_pipeline_stages diff --git a/doc/source/admin/running_benchmark.rst b/doc/source/admin/running_benchmark.rst deleted file mode 100644 index 08ff8c25..00000000 --- a/doc/source/admin/running_benchmark.rst +++ /dev/null @@ -1,184 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Running benchmark -================= - -Introduction ------------- - -This document describes how to run benchmarking tool. - -Zaqar Contributors can use this tool to test how the particular code change -affects Zaqar's performance. - -Usage ------ - -1. First install and run zaqar-server. - - For example, you can setup Zaqar in development environment. - - See :doc:`../contributor/development.environment`. - -2. In your terminal cd into your local Zaqar repo and install additional - requirements: - - .. code-block:: console - - $ pip install -r bench-requirements.txt - -3. Copy the configuration file to ~/.zaqar: - - .. code-block:: console - - $ cp etc/zaqar-benchmark.conf.sample ~/.zaqar/zaqar-benchmark.conf - -4. In this configuration file specify where zaqar-server can be found: - - .. code-block:: ini - - server_url = http://localhost:8888 - -5. The benchmarking tool needs a set of messages to work with. Specify the path - to the file with messages in the configuration file. Alternatively, put - it in the directory with the configuration file and name it - ``zaqar-benchmark-messages.json``. - As a starting point, you can use the sample file from the etc directory: - - .. code-block:: console - - $ cp etc/zaqar-benchmark-messages.json ~/.zaqar/ - - If the file is not found or no file is specified, a single hard-coded - message is used for all requests. - -6. Run the benchmarking tool using the following command: - - .. code-block:: console - - $ zaqar-bench - - By default, the command will run a performance test for 5 seconds, using one - producer process with 10 greenlet workers, and one observer process with 5 - workers. The consumer role is disabled by default. - - You can override these defaults in the config file or on the command line - using a variety of options. For example, the following command runs a - performance test for 30 seconds using 4 producer processes with 20 workers - each, plus 4 consumer processes with 20 workers each. - - Note that the observer role is also disabled in this example by setting its - number of workers to zero: - - .. code-block:: console - - $ zaqar-bench -pp 4 -pw 10 -cp 4 -cw 20 -ow 0 -t 30 - - By default, the results are in human-readable format. For JSON output add - the ``--noverbose`` flag. The non-verbose output looks similar to the - following: - - .. code-block:: console - - $ zaqar-bench --noverbose - Using 'envvars' credentials - Using 'keystone' authentication method - Benchmarking Zaqar API v2... - {"params": {"consumer": {"processes": 1, "workers": 0}, "observer": {"processes": 1, "workers": 5}, "producer": {"processes": 1, "workers": 10}}, "consumer": {"claim_total_requests": 0, "ms_per_claim": 0, "total_reqs": 0, "reqs_per_sec": 0, "successful_reqs": 0, "duration_sec": 0, "ms_per_delete": 0, "messages_processed": 0}, "producer": {"duration_sec": 8.569170951843262, "ms_per_req": 201.715140507139, "total_reqs": 29, "successful_reqs": 29, "reqs_per_sec": 3.384224700729303}, "observer": {"duration_sec": 8.481178045272827, "ms_per_req": 407.40778711107043, "total_reqs": 18, "successful_reqs": 18, "reqs_per_sec": 2.122346672115049}} - - By default, zaqar-bench is benchmarking Zaqar API version 2. To run - benchmark against other API versions use ``-api`` parameter. For - example: - - .. code-block:: console - - $ zaqar-bench -api 1.1 - -Configuring zaqar-bench to use Keystone authentication -###################################################### - -It's possible to use zaqar-bench with Keystone authentication, if your Zaqar is -configured to use Keystone authentication method and the Keystone service is -running. For example, this is always true when running DevStack_ with -unmodified ``zaqar.conf``. - -Let's configure zaqar-bench too to use Keystone: - -#. Set zaqar-bench's authentication method to Keystone. - - By default zaqar-bench is using ``noauth`` method. This can be changed by - setting the environment variable ``OS_AUTH_STRATEGY`` to ``keystone``. - - To set this environment variable: - - * temporarily, run: - - .. code-block:: console - - $ export OS_AUTH_STRATEGY=keystone - - * permanently, add this line to your ``~/bashrc`` file: - - .. code-block:: bash - - export OS_AUTH_STRATEGY=keystone - - Reboot your computer or just run in the terminal where you will start - zaqar-bench: - - .. code-block:: console - - $ source ~/.bashrc - -#. Set Keystone credentials for zaqar-bench. - - * If you're running Zaqar under DevStack, **you can omit this step**, - because zaqar-bench will automatically get administrator or user - credentials from the one of the files created by DevStack: either from - ``/etc/openstack/clouds.yaml`` file or from - ``~/.config/openstack/clouds.yaml`` file, if it exists. - - * If you're running manually configured Zaqar with manually configured - Keystone (not under DevStack): - - Add these lines to your ``~/.bashrc`` file and specify the valid Keystone - credentials: - - .. code-block:: bash - - export OS_AUTH_URL="http:///v2.0" - export OS_USERNAME="" - export OS_PASSWORD="" - export OS_PROJECT_NAME="" - - Reboot your computer or just run in the terminal where you will start - zaqar-bench: - - .. code-block:: console - - $ source ~/.bashrc - -#. Run zaqar-bench as usual, for example: - - .. code-block:: console - - $ zaqar-bench - - If everything is properly configured, zaqar-bench must show the line - ``Using 'keystone' authentication method`` and execute without - authentication errors. - - -.. _DevStack: http://docs.openstack.org/developer/devstack/ diff --git a/doc/source/admin/subscription_confirm.rst b/doc/source/admin/subscription_confirm.rst deleted file mode 100644 index 54e3160d..00000000 --- a/doc/source/admin/subscription_confirm.rst +++ /dev/null @@ -1,298 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================== -The subscription Confirm Guide -============================== - -The subscription confirm feature now supports webhook and email with both -mongoDB and redis backend. -This guide shows how to use this feature: - -Webhook -------- - -.. note:: - - You should make sure that the message notification is enabled. By default, - the ``message_pipeline`` config option in [storage] section should be set - like: message_pipeline = zaqar.notification.notifier - -1. Set the config option "require_confirmation" and add the policy to the -policy.json file. Then restart Zaqar-wsgi service:: - - In the config file: - [notification] - require_confirmation = True - - In the policy.json file: - "subscription:confirm": "", - -2. Create a subscription. - -Here used zaqar/samples/zaqar/subscriber_service_sample.py be the subscriber -endpoint for example.So before the step 2, you should start the subscriber -service first. -The service could be started simply by the command:: - - python zaqar/samples/zaqar/subscriber_service_sample.py - -The service's default port is 5678. If you want to use a new port, the command -will be like:: - - python zaqar/samples/zaqar/subscriber_service_sample.py new_port_number - -The service will not confirm the subscription automatically by default. If you -want to do that, the command will be like:: - - python zaqar/samples/zaqar/subscriber_service_sample.py --auto-confirm - -Then create a subscription:: - - curl -i -X POST http://10.229.47.217:8888/v2/queues/test/subscriptions \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ - -d '{"subscriber":"http://10.229.47.217:5678", "ttl":3600, "options":{}}' - -The response:: - - HTTP/1.1 201 Created - content-length: 47 - content-type: application/json; charset=UTF-8 - location: http://10.229.47.217:8888/v2/queues/test/subscriptions - Connection: close - {"subscription_id": "576256b03990b480617b4063"} - -At the same time, If the subscriber sample service is not start by -"--auto confirm", you will receive a POST request in the subscriber sample -service, the request is like:: - - WARNING:root:{"UnsubscribeBody": {"confirmed": false}, "URL-Methods": "PUT", - "X-Project-ID": "51be2c72393e457ebf0a22a668e10a64", - "URL-Paths": "/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm", - "URL-Expires": "2016-07-06T04:35:56", "queue_name": "test", - "SubscribeURL": ["/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm"], - "SubscribeBody": {"confirmed": true}, - "URL-Signature": "d4038a40589cdb61cd13d5a6997472f5be779db441dd8fe0c597a6e465f30c41", - "Message": "You have chosen to subscribe to the queue: test", - "Message_Type": "SubscriptionConfirmation"} - 10.229.47.217 - - [06/Jul/2016 11:35:56] "POST / HTTP/1.1" 200 - - -If you start the sample service with "--auto confirm", please go to step 6 -directly, because the step 5 will be done by the service automatically. - -3. Get the subscription. -The request:: - - curl -i -X GET http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063 \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" - -The response:: - - HTTP/1.1 200 OK - content-length: 154 - content-type: application/json; charset=UTF-8 - Connection: close - {"confirmed": false, "age": 73, "id": "576256b03990b480617b4063", - "subscriber": "http://10.229.47.217:5678", "source": "test", "ttl": 3600, "options": {}} - -You can find that the "confirmed" property is false by default. - -4. Post a message to the subscription's queue -The request:: - - curl -i -X POST http://10.229.47.217:8888/v2/queues/test/messages \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ - -d '{"messages": [{"ttl": 3600,"body": "test123"}]}' - -The response:: - - HTTP/1.1 201 Created - content-length: 68 - content-type: application/json; charset=UTF-8 - location: http://10.229.47.217:8888/v2/queues/test/messages?ids=57624dee3990b4634d71bb4a - Connection: close - {"resources": ["/v2/queues/test/messages/57624dee3990b4634d71bb4a"]} - -The subscriber received nothing and you will find a log info in zaqar-wsgi.:: - - 2016-07-06 11:37:57.929 98400 INFO zaqar.notification.notifier - [(None,)2473911afe2642c0b74d7e1200d9bba7 51be2c72393e457ebf0a22a668e10a64 - - -] - The subscriber http://10.229.47.217:5678 is not confirmed. - -5. Use the information showed in step3 to confirm the subscription -The request:: - - curl -i -X PUT http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "URL-Methods: PUT" -H "X-Project-ID: 51be2c72393e457ebf0a22a668e10a64" \ - -H "URL-Signature: d28dced4eabbb09878a73d9a7a651df3a3ce5434fcdb6c3727decf6c7078b282" \ - -H "URL-Paths: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm" \ - -H "URL-Expires: 2016-06-16T08:35:12" -d '{"confirmed": true}' - -The response:: - - HTTP/1.1 204 No Content - location: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm - Connection: close - -6. Repeat step3 to get the subscription -The request:: - - curl -i -X GET http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063 \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" - -The response:: - - HTTP/1.1 200 OK - content-length: 155 - content-type: application/json; charset=UTF-8 - Connection: close - {"confirmed": true, "age": 1370, "id": "576256b03990b480617b4063", - "subscriber": "http://10.229.47.217:5678", "source": "test", "ttl": 3600, - "options": {}} - -The subscription is confirmed now. - -7. Repeat step4 to post a new message. -The request:: - - curl -i -X POST http://10.229.47.217:8888/v2/queues/test/messages \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ - -d '{"messages": [{"ttl": 3600,"body": "test123"}]}' - -The response:: - - HTTP/1.1 201 Created - content-length: 68 - content-type: application/json; charset=UTF-8 - location: http://10.229.47.217:8888/v2/queues/test/messages?ids=5762526d3990b474c80d5483 - Connection: close - {"resources": ["/v2/queues/test/messages/5762526d3990b474c80d5483"]} - -Then in subscriber sample service, you will receive a request:: - - WARNING:root:{"body": {"event": "BackupStarted"}, "queue_name": "test", - "Message_Type": "Notification", "ttl": 3600} - 10.229.47.217 - - [06/Jul/2016 13:19:07] "POST / HTTP/1.1" 200 - - -8. Unsubscription. -The request:: - - curl -i -X PUT http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "URL-Methods: PUT" -H "X-Project-ID: 51be2c72393e457ebf0a22a668e10a64" \ - -H "URL-Signature: d28dced4eabbb09878a73d9a7a651df3a3ce5434fcdb6c3727decf6c7078b282" \ - -H "URL-Paths: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm" \ - -H "URL-Expires: 2016-06-16T08:35:12" -d '{"confirmed": false}' - -The response:: - - HTTP/1.1 204 No Content - location: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm - Connection: close - -Then try to post a message. The subscriber will not receive the notification -any more. - -Email ------ - -1. For the email confirmation way, also need to set the config option -"external_confirmation_url", "subscription_confirmation_email_template" and -"unsubscribe_confirmation_email_template". - -The confirmation page url that will be used in email subscription confirmation -before notification, this page is not hosted in Zaqar server, user should -build their own web service to provide this web page. - -The subscription_confirmation_email_template let user to customize the -subscription confirmation email content, including topic, body and sender. - -The unsubscribe_confirmation_email_template let user to customize the -unsubscribe confirmation email content, including topic, body and sender too:: - - In the config file: - [notification] - require_confirmation = True - external_confirmation_url = http://web_service_url/ - subscription_confirmation_email_template = topic:Zaqar Notification - Subscription Confirmation,\ - body:'You have chosen to subscribe to the queue: {0}. This queue belongs to project: {1}. To confirm this subscription, click or visit this link below: {2}',\ - sender:Zaqar Notifications - unsubscribe_confirmation_email_template = topic: Zaqar Notification - Unsubscribe Confirmation,\ - body:'You have unsubscribed successfully to the queue: {0}. This queue belongs to project: {1}. To resubscribe this subscription, click or visit this link below: {2}',\ - sender:Zaqar Notifications - - In the policy.json file: - "subscription:confirm": "", - -2. Create a subscription. -For email confirmation, you should create a subscription like this:: - - curl -i -X POST http://10.229.47.217:8888/v2/queues/test/subscriptions \ - -H "Content-type: application/json" \ - -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ - -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ - -d '{"subscriber":"your email address", "ttl":3600, "options":{}}' - -The response:: - - HTTP/1.1 201 Created - content-length: 47 - content-type: application/json; charset=UTF-8 - location: http://10.229.47.217:8888/v2/queues/test/subscriptions - Connection: close - {"subscription_id": "576256b03990b480617b4063"} - -After the subscription created, Zaqar will send a email to the email address -of subscriber. The email specifies how to confirm the subscription. - -3. Click the confirmation page link in the email body - -4. The confirmation page will send the subscription confirmation request to -Zaqar server automatically. User also can choose to unsubscribe by clicking -the unsubscription link in this page, that will cause Zaqar to cancel this -subscription and send another email to notify this unsubscription action. -Zaqar providers two examples of those web pages that will help user to build -their own pages:: - - zaqar/sample/html/subscriptionConfirmation.html - zaqar/sample/html/unsubscriptionConfirmation.html - -User can place those pages in web server like Apache to access them by browser, -so the external_confirmation_url will be like this:: - - http://127.0.0.1:8080/subscriptionConfirmation.html - -For CORS, here used zaqar/samples/html/confirmation_web_service_sample.py -be a simple web service for example, it will relay the confirmation request to -Zaqar Server. So before Step 3, you should start the web service first. -The service could be started simply by the command:: - - python zaqar/samples/html/confirmation_web_service_sample.py - -The service's default port is 5678. If you want to use a new port, the command -will be like:: - - python zaqar/samples/html/confirmation_web_service_sample.py new_port_number diff --git a/doc/source/admin/writing_pipeline_stages.rst b/doc/source/admin/writing_pipeline_stages.rst deleted file mode 100644 index 46c2c82c..00000000 --- a/doc/source/admin/writing_pipeline_stages.rst +++ /dev/null @@ -1,225 +0,0 @@ -======================================== -Writing stages for the storage pipelines -======================================== - -Introduction -~~~~~~~~~~~~ - -A pipeline is a set of stages needed to process a request. When a new request -comes to Zaqar, first the message goes through the transport layer pipeline and -then through one of the storage layer pipelines depending on the type of -operation of each particular request. For example, if Zaqar receives a -request to make a queue-related operation, the storage layer pipeline will be -``queue pipeline``. Zaqar always has the actual storage controller as the -final storage layer pipeline stage. - -By setting the options in the ``[storage]`` section of ``zaqar.conf`` -you can add additional stages to these storage layer pipelines: - -* **Claim pipeline** -* **Message pipeline** with built-in stage available to use: - - * ``zaqar.notification.notifier`` - sends notifications to the queue - subscribers on each incoming message to the queue, i.e. enables - notifications functionality. -* **Queue pipeline** -* **Subscription pipeline** - -The storage layer pipelines options are empty by default, because additional -stages can affect the performance of Zaqar. Depending on the stages, the -sequence in which the option values are listed does matter or not. - -You can add your own external stages to the storage layer pipelines. - -Things to know before writing the stage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Stages in the pipeline must implement storage controller methods they need -to hook. You can find all available to hook methods in the abstract classes in -``zaqar/storage/base.py``. For example, if you're looking for all methods -available to hook for the queue storage layer pipeline, see ``Queue`` -class in ``zaqar/storage/base.py``. As you can see, Zaqar's built-in stage -``zaqar.notification.notifier`` implements ``post`` method of -``zaqar.storage.base.Message`` abstract class. - -A stage can halt the pipeline immediate by returning a value that is not -None; otherwise, processing will continue to the next stage, ending with the -actual storage controller. - -.. warning:: - - For the most of the cases it does not matter what non-None value the storage - pipeline returns, but sometimes the returned value is used by the transport - layer and you have to be careful. For example, during queue creation - request, if the storage driver returns ``True``, the transport layer - responds to the client with the ``201`` http response code, if ``False``, - it responds with ``204`` http response code. See: - ``zaqar.transport.wsgi.v2_0.queues.ItemResource#on_put``. - -Zaqar finds stages with their source codes through the Python entry points -mechanism. All Python packages containing stages for Zaqar must register -their stages under ``zaqar.storage.stages`` entry point group during their -install either by ``setup.py`` or by ``setup.cfg``. If the stage is registered, -and the name of the stage's entry point is specified by the user in the one of -``zaqar.conf`` storage layer pipeline options, the stage will be loaded to -the particular storage layer pipeline. Zaqar imports stages as plugins. See -``zaqar.storage.pipeline#_get_storage_pipeline``. - -For additional information about plugins see: `Stevedore - Creating Plugins`_ -and `Stevedore - Loading the Plugins`_. - -Example of external stage (written outside Zaqar package) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is an example of small package with a stage that can process queue-related -requests in Zaqar. The stage does not do anything useful, but is good as -example. - -File tree structure of the package: - -.. code-block:: none - - . - ├── setup.py - └── ubershystages - ├── __init__.py - └── queues - ├── __init__.py - └── lovely.py - - 2 directories, 4 files - -``setup.py``: - -.. code-block:: python - - from setuptools import setup, find_packages - - setup( - name='ubershystages', - version='1.0', - - description='Demonstration package for Zaqar with plugin pipeline stage', - - author='Ubershy', - author_email='ubershy@gmail.com', - - url='', - - classifiers=['Development Status :: 3 - Alpha', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Intended Audience :: Developers', - 'Environment :: Console', - ], - - platforms=['Any'], - - scripts=[], - - packages=find_packages(), - include_package_data=True, - - entry_points={ - 'zaqar.storage.stages': [ - 'ubershy.lovelyplugin = ubershystages.queues.lovely:LovelyStage', - ], - }, - - zip_safe=False, - ) - -``lovely.py``: - -.. code-block:: python - - class LovelyStage(object): - """This stage: - 1. Prints 'Lovely stage is processing request...' on each queue creation or - deletion request. - 2. Prints 'Oh, what a lovely day!' on each creation request of a queue - named 'lovely'. - 3. Prevents deletion of a queue named 'lovely' and prints 'Secretly keeping - lovely queue' on such attempt. - """ - - def __init__(self, *args, **kwargs): - print("Lovely stage is loaded!") - - def create(self, name, metadata=None, project=None): - """Stage's method which processes queue creation request. - - :param name: The queue name - :param project: Project id - """ - - self.printprocessing() - if name == 'lovely': - print("Oh, what a lovely day!") - - def delete(self, name, project=None): - """Stage's method which processes queue deletion request. - - :param name: The queue name - :param project: Project id - :returns: Something non-None, if the queue has a name 'lovely'. It will - stop further processing through the other stages of the pipeline, and - the request will not reach the storage controller driver, preventing - queue deletion from the database. - """ - - self.printprocessing() - if name == 'lovely': - print('Secretly keeping lovely queue') - something = "shhh... it's a bad practice" - return something - - def printprocessing(self): - print('Lovely stage is processing request...') - -To install the package to the system in the root directory of the package run: - -.. code-block:: console - - # pip install -e . - -In ``zaqar.conf`` add ``ubershy.lovelyplugin`` to the ``queue_pipeline`` -option: - -.. code-block:: ini - - [storage] - queue_pipeline = ubershy.lovelyplugin - -Start Zaqar: - -.. code-block:: console - - $ zaqar-server - -If the stage has successfully loaded to Zaqar you will see amongst terminal -output lines the ``Lovely stage is loaded!`` line. Then you can try to perform -queue create and queue delete operations with the queue 'lovely' and see what -will happen in Zaqar's database. - -.. note:: - - You can hold multiple stages in one package, just be sure that all stages - will be registered as entry points. For example, in the ``setup.py`` you - can register additional ``ubershy.nastyplugin`` stage: - - .. code-block:: python - - entry_points={ - 'zaqar.storage.stages': [ - 'ubershy.lovelyplugin = ubershystages.queues.lovely:LovelyStage', - 'ubershy.nastyplugin = ubershystages.messages.nasty:NastyStage', - ], - }, - -.. _`Stevedore - Creating Plugins`: http://docs.openstack.org/developer/stevedore/tutorial/creating_plugins.html -.. _`Stevedore - Loading the Plugins`: http://docs.openstack.org/developer/stevedore/tutorial/loading.html diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index f23b57bb..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,234 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This file is execfile()d with the current directory set -# to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. -# They can be extensions coming with Sphinx (named 'sphinx.ext.*') -# or your custom ones. - -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.ifconfig', - 'sphinx.ext.graphviz', - 'openstackdocstheme', - ] - -# autodoc generation is a bit aggressive and a nuisance -# when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" -# in your terminal to disable - -todo_include_todos = True - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'zaqar' -copyright = u'2010-present, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from zaqar.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -unused_docs = [ - 'api_ext/rst_extension_template', - 'installer', -] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = [] - -# The reST default role (used for this markup: `text`) to use -# for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['zaqar.'] - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_use_modindex = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'zaqardoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Zaqar.tex', u'Zaqar Documentation', - u'Anso Labs, LLC', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True - -# -- Options for openstackdocstheme ------------------------------------------- -repository_name = 'openstack/zaqar' -bug_project = 'zaqar' -bug_tag = '' diff --git a/doc/source/contributor/development.environment.rst b/doc/source/contributor/development.environment.rst deleted file mode 100644 index 52e6d989..00000000 --- a/doc/source/contributor/development.environment.rst +++ /dev/null @@ -1,298 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================================== -Setting up a development environment -==================================== - -This section describes how to setup a working Python development environment -that you can use in developing Zaqar on Ubuntu or Fedora. These instructions -assume that you are familiar with Git. Refer to GettingTheCode_ for -additional information. - -.. _GettingTheCode: http://wiki.openstack.org/GettingTheCode - - -Virtual environments --------------------- - -Use virtualenv_ to track and manage Python dependencies for developing and -testing Zaqar. -Using virtualenv_ enables you to install Python dependencies in an isolated -virtual environment, instead of installing the packages at the system level. - -.. _virtualenv: http://pypi.python.org/pypi/virtualenv - -.. note:: - - Virtualenv is useful for development purposes, but is not typically used for - full integration testing or production usage. If you want to learn about - production best practices, check out the `OpenStack Operations Guide`_. - - .. _`OpenStack Operations Guide`: http://docs.openstack.org/ops/ - -Install GNU/Linux system dependencies -##################################### - -.. note:: - - This section is tested for Zaqar on Ubuntu 14.04 (Trusty) and Fedora-based - (RHEL 6.1) distributions. Feel free to add notes and change according to your - experiences or operating system. Learn more about contributing to Zaqar - documentation in the :doc:`welcome` manual. - -Install the prerequisite packages. - -On Ubuntu: - -.. code-block:: console - - $ sudo apt-get install gcc python-pip libxml2-dev libxslt1-dev python-dev zlib1g-dev - -On Fedora-based distributions (e.g., Fedora/RHEL/CentOS): - -.. code-block:: console - - $ sudo yum install gcc python-pip libxml2-devel libxslt-devel python-devel - -Install MongoDB -############### - -You also need to have MongoDB_ installed and running. - -.. _MongoDB: http://www.mongodb.org - -On Ubuntu, follow the instructions in the -`MongoDB on Ubuntu Installation Guide`_. - -.. _`MongoDB on Ubuntu installation guide`: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ - -On Fedora-based distributions, follow the instructions in the -`MongoDB on Red Hat Enterprise, CentOS, Fedora, or Amazon Linux Installation Guide`_. - -.. _`MongoDB on Red Hat Enterprise, CentOS, Fedora, or Amazon Linux installation guide`: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-red-hat-centos-or-fedora-linux/ - -.. note:: - - If you are Contributor and plan to run Unit tests on Zaqar, you may want to - add this line to mongodb configuration file (``etc/mongod.conf`` or - ``etc/mongodb.conf`` depending on distribution): - - .. code-block:: ini - - smallfiles = true - - Many Zaqar's Unit tests do not clean up their testing databases after - executing. And database files consume much disk space even if they do not - contain any records. This behavior will be fixed soon. - -Getting the code -################ - -Get the code from git.openstack.org to create a local repository with Zaqar: - -.. code-block:: console - - $ git clone https://git.openstack.org/openstack/zaqar.git - -Configuration -############# - -#. From your home folder create the ``~/.zaqar`` folder. This directory holds - the configuration files for Zaqar: - - .. code-block:: console - - $ mkdir ~/.zaqar - -#. Generate the sample configuration file ``zaqar/etc/zaqar.conf.sample``: - - .. code-block:: console - - $ pip install tox - $ cd zaqar - $ tox -e genconfig - -#. Copy the Zaqar configuration samples to the directory ``~/.zaqar/``: - - .. code-block:: console - - $ cp etc/zaqar.conf.sample ~/.zaqar/zaqar.conf - $ cp etc/logging.conf.sample ~/.zaqar/logging.conf - -#. Find the ``[drivers]`` section in ``~/.zaqar/zaqar.conf`` and specify - ``mongodb`` as the message store: - - .. code-block:: ini - - message_store = mongodb - management_store = mongodb - -#. Then find ``[drivers:message_store:mongodb]`` and - ``[drivers:management_store:mongodb]`` sections and specify the - :samp:`{URI}` to point to your local mongodb instance by adding this line - to both the sections: - - .. code-block:: ini - - uri = mongodb://$MONGODB_HOST:$MONGODB_PORT - - By default you will have: - - .. code-block:: ini - - uri = mongodb://127.0.0.1:27017 - - This :samp:`{URI}` points to single mongodb node which of course is not - reliable, so you need to set in the ``[default]`` section of configuration - file: - - .. code-block:: ini - - unreliable = True - - For your reference, you can omit this parameter or set it to False only - if the provided :samp:`{URI}` to your mongodb is actually the URI to mongodb - Replica Set or Mongos. Also it must have "Write concern" parameter set to - ``majority`` or to a number more than ``1``. - - For example, :samp:`{URI}` to reliable mongodb can look like this: - - .. code-block:: ini - - uri = mongodb://mydb0,mydb1,mydb2:27017/?replicaSet=foo&w=2 - - Where ``mydb0``, ``mydb1``, ``mydb2`` are addresses of the configured - mongodb Replica Set nodes, ``replicaSet`` (Replica Set name) parameter is - set to ``foo``, ``w`` (Write concern) parameter is set to ``2``. - -#. For logging, find the ``[handler_file]`` section in - ``~/.zaqar/logging.conf`` and modify as desired: - - .. code-block:: ini - - args=('zaqar.log', 'w') - -Installing and using virtualenv -############################### - -#. Install virtualenv by running: - - .. code-block:: console - - $ pip install virtualenv - -#. Create and activate a virtual environment: - - .. code-block:: console - - $ virtualenv zaqarenv - $ source zaqarenv/bin/activate - -#. Install Zaqar: - - .. code-block:: console - - $ pip install -e . - -#. Install the required Python binding for MongoDB: - - .. code-block:: console - - $ pip install pymongo - -#. Start Zaqar server in ``info`` logging mode: - - .. code-block:: console - - $ zaqar-server -v - - Or you can start Zaqar server in ``debug`` logging mode: - - .. code-block:: console - - $ zaqar-server -d - -#. Verify Zaqar is running by creating a queue via curl. In a separate - terminal run: - - .. code-block:: console - - $ curl -i -X PUT http://localhost:8888/v1/queues/samplequeue -H "Content-type: application/json" - -#. Get ready to code! - -.. note:: - - You can run the Zaqar server in the background by passing the - ``--daemon`` flag: - - .. code-block:: console - - $ zaqar-server -v --daemon - - But with this method you will not get immediate visual feedback and it will - be harder to kill and restart the process. - -Troubleshooting -^^^^^^^^^^^^^^^ - -No handlers found for zaqar.client (...) -"""""""""""""""""""""""""""""""""""""""" - -This happens because the current user cannot create the log file (for the -default configuration in ``/var/log/zaqar/server.log``). To solve it, create -the folder: - -.. code-block:: console - - $ sudo mkdir /var/log/zaqar - -Create the file: - -.. code-block:: console - - $ sudo touch /var/log/zaqar/server.log - -And try running the server again. - -DevStack --------- - -If you want to use Zaqar in an integrated OpenStack developing environment, you -can add it to your DevStack_ deployment. - -To do this, you first need to add the following setting to your ``local.conf``: - -.. code-block:: bash - - enable_plugin zaqar https://git.openstack.org/openstack/zaqar - -Then run the ``stack.sh`` script as usual. - -.. _DevStack: http://docs.openstack.org/developer/devstack/ - -Running tests -------------- - -See :doc:`running_tests` for details. - -Running the benchmarking tool ------------------------------ - -See :doc:`../admin/running_benchmark` for details. - -Contributing your work ----------------------- - -See :doc:`welcome` and :doc:`first_patch` for details. diff --git a/doc/source/contributor/first_patch.rst b/doc/source/contributor/first_patch.rst deleted file mode 100644 index f08aef48..00000000 --- a/doc/source/contributor/first_patch.rst +++ /dev/null @@ -1,320 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================ -Your first patch -================ - -This section describes how to create your first patch and upload it to -Gerrit_ for reviewing. - - -Create your contributor accounts and set up your code environment ------------------------------------------------------------------ - -Accounts setup -############## - -You will need to create a Launchpad_ account to login to the Gerrit_ review -system dashboard. -This is also useful for automatically crediting bug fixes to you when you -address them with your code commits. You will also have to sign the -`Contributors License Agreement`_ and `join the OpenStack Foundation`_. -It is a good idea to use the same email all of these accounts to -avoid hooks errors. - -Visit the `Gerrit Workflow's account setup`_ section in the wiki to get -more information on setting up your accounts. - -.. _Launchpad: http://launchpad.net/ -.. _Gerrit: http://review.openstack.org/ -.. _`Contributors License Agreement`: http://docs.openstack.org/infra/manual/developers.html#account-setup -.. _`join the OpenStack Foundation`: http://openstack.org/join -.. _`Gerrit Workflow's account setup`: http://docs.openstack.org/infra/manual/developers.html#account-setup - -SSH setup -######### - -You are going to need to create and upload an SSH key to Gerrit to be able to -commit changes for review. To create an SSH key: - -.. code-block:: console - - $ ssh-keygen –t rsa - -You can optionally enter a password to enhance security. - -View and copy your SSH key: - -.. code-block:: console - - $ less ~/.ssh/id_rsa.pub - -Now you can `upload the SSH key to Gerrit`_. - -.. _`upload the SSH key to Gerrit`: https://review.openstack.org/#/settings/ssh-keys - -Git Review installation -####################### - -Before you start working, make sure you have ``git-review`` installed on your -system. - -You can install it with the following command: - -.. code-block:: console - - $ pip install git-review - -``Git-review`` checks if you can authenticate to Gerrit with your SSH key. -It will ask you for your username. You can configure your Gerrit username so -you don't have to keep re-entering it every time you want to use -``git-review``: - -.. code-block:: console - - $ git config --global gitreview.username yourgerritusername - -You can also save some time by entering your email and your name: - -.. code-block:: console - - $ git config --global gitreview.email "yourgerritemail" - $ git config --global gitreview.name "Firstname Lastname" - -You can view your Gerrit user name in the `settings page`_. - -.. _`settings page`: https://review.openstack.org/#/settings/ - -Project setup -############# - -Clone the Zaqar repository with the following git command: - -.. code-block:: console - - $ git clone git://git.openstack.org/openstack/zaqar.git - -For information on how to set up the Zaqar development environment -see :doc:`development.environment`. - -Before writing code, you will have to do some configurations to connect your -local repository with Gerrit. You will only need to do this your first time -setting up the development environment. - -You can set ``git-review`` to configure the project and install the Gerrit -change-id commit hook with the following command: - -.. code-block:: console - - $ cd zaqar - $ git review -s - -If you get the error "We don't know where your Gerrit is", you will need to add -a new git remote. The URL should be in the error message. Copy that and create -the new remote. It looks something like: - -.. code-block:: console - - $ git remote add gerrit ssh://@review.openstack.org:29418/openstack/zaqar.git - -In the project directory you have a hidden ``.git`` directory and a -``.gitreview`` file. You can view them with the following command: - -.. code-block:: console - - $ ls -la - -Making a patch --------------- - -Pick or report a bug -#################### - -You can start tackling some bugs from the `bugs list in Launchpad`_. -If you find a bug you want to work on, assign yourself. Make sure to read the -bug report. If you need more information, ask the reporter to provide more -details through a comment on Launchpad or through IRC or email. - -If you find a bug, look through Launchpad to see if it has been reported. If it -hasn't, report the bug, and ask for another developer to confirm it. You can -start working on it if another developer confirms the bug. - -Here are some details you might want to include when filling out a bug report: - -* The release, or milestone, or commit ID corresponding to the software that - you are running -* The operating system and version where you've identified the bug -* Steps to reproduce the bug, including what went wrong -* Description of the expected results instead of what you saw -* Portions of your log files so that you include only relevant excerpts - -In the bug comments, you can contribute instructions on how to fix a given bug, -and set the status to "Triaged". - -You can read more about `Launchpad bugs`_ in the wiki. - -.. _`bugs list in Launchpad`: https://bugs.launchpad.net/zaqar -.. _`Launchpad bugs`: https://wiki.openstack.org/wiki/Bugs - -Workflow -######## - -Make sure your repo is up to date. You can update it with the following git -commands: - -.. code-block:: console - - $ git remote update - $ git checkout master - $ git pull --ff-only origin master - -Create a topic branch. You can create one with the following git command: - -.. code-block:: console - - $ git checkout -b TOPIC-BRANCH - -If you are working on a blueprint, name your :samp:`{TOPIC-BRANCH}` -``bp/BLUEPRINT`` where :samp:`{BLUEPRINT}` is the name of a blueprint in -Launchpad (for example, "bp/authentication"). The general convention when -working on bugs is to name the branch ``bug/BUG-NUMBER`` (for example, -"bug/1234567"). - -Read more about the commit syntax in the `Gerrit workflow`_ wiki. - -.. _`Gerrit workflow`: http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Common problems -^^^^^^^^^^^^^^^ - -#. You realized that you were working in master and you haven't made any - commits. Solution: - - .. code-block:: console - - $ git checkout -b newbranch - $ git commit -a -m "Edited" - - If you already created the branch, omit the ``-b``. - - You put all your changes to :samp:`{newbranch}`. Problem solved. - -#. You realized that you were working in master and you have made commits to - master. Solution: - - .. code-block:: console - - $ git branch newbranch - $ git reset --hard HEAD~x - $ git checkout newbranch - - Where ``x`` is the number of commits you have made to master. - And remember, you will lose any uncommitted work. - - You put your commits in :samp:`{newbranch}`. Problem solved. - -#. You made multiple commits and realized that Gerrit requires one commit per - patch. Solution: - - * You need to squash your previous commits. Make sure you are in your - branch and follow `squashing guide`_. Then fill commit message properly. - - You squashed your commits. Problem solved. - -Design principles -################# - -Zaqar lives by the following design principles: - -* `DRY`_ -* `YAGNI`_ -* `KISS`_ - -.. _`DRY`: https://en.wikipedia.org/wiki/Don%27t_repeat_yourself -.. _`YAGNI`: https://en.wikipedia.org/wiki/YAGNI -.. _`KISS`: https://en.wikipedia.org/wiki/KISS_principle - -Try to stick to these design principles when working on your patch. - -Test your code -############## - -It is important to test your code and follow the python code style guidelines. -See :doc:`running_tests` for details on testing. - -Submitting a patch ------------------- - -Once you finished coding your fix, add and commit your final changes. -Your commit message should: - -* Provide a brief description of the change in the first line. -* Insert a single blank line after the first line. -* Provide a detailed description of the change in the following lines, - breaking paragraphs where needed. -* The first line should be limited to 50 characters and should not end with a - period. -* Subsequent lines should be wrapped at 72 characters. -* Put the 'Change-id', 'Closes-Bug #NNNNN' and 'blueprint NNNNNNNNNNN' - lines at the very end. - -Read more about `making a good commit message`_. - -To submit it for review use the following git command: - -.. code-block:: console - - $ git review - -You will see the URL of your review page once it is successfully sent. - -You can also see your reviews in :guilabel:`My Changes` in Gerrit. The first -thing to watch for is a ``+1`` in the :guilabel:`Verified` column next to your -patch in the server and/or client list of pending patches. - -If the "Jenkins" user gives you a ``-1``, you'll need to check the log it posts -to find out what gate test failed, update your patch, and resubmit. - -You can set your patch as a :guilabel:`work in progress` if your patch is -not ready to be merged, but you would still like some feedback from other -developers. To do this leave a review on your patch setting -:guilabel:`Workflow` to ``-1``. - -Once the gate has verified your patch, other Zaqar developers will take a look -and submit their comments. When you get two or more ``+2``'s from core -reviewers, the patch will be approved and merged. - -Don't be discouraged if a reviewer submits their comments with a ``-1``. -Patches iterate through several updates and reviews before they are ready for -merging. - -To reply to feedback save all your comments as draft, then click on the -:guilabel:`Review` button. When replying to feedback, you as the patch -author can use the score of ``0``. The only exception to using the score of -``0`` is when you discover a blocking issue and you don't want your patch to -be merged. In which case, you can review your own patch with a ``-2``, while -you decide whether to keep, refactor, or withdraw the patch. - -Professional conduct --------------------- - -The Zaqar team holds reviewers accountable for promoting a positive, -constructive culture within our program. - -If you ever feel that a reviewer is not acting professionally or is violating -the OpenStack community code of conduct, please let the PTL know immediately -so that he or she can help resolve the issue. - -.. _`making a good commit message`: https://wiki.openstack.org/wiki/GitCommitMessages -.. _`squashing guide` : http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html diff --git a/doc/source/contributor/first_review.rst b/doc/source/contributor/first_review.rst deleted file mode 100644 index 29158ff7..00000000 --- a/doc/source/contributor/first_review.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Your first review -================= - -The review stage is a very important part in the development process. Following -are some of the reasons this stage is important: - -* Getting other developers feedback minimizes the risk of adding - regressions to the code base and ensures the quality of the code being - merged. -* Building the community encourages everyone to review code. Everyone - appreciates having their code reviewed. -* Since developers are always learning from being exposed to the points of view - of others, reviews help developers to improve their coding skills. -* Providing a review is a great way to become familiar with the code. - -Everyone is encourages to review code. You don't need to know every detail of -the code base. You need to understand only what the code related to the fix -does. - -Step by step ------------- - -Go to ``review.openstack.org`` and filter by `Open Zaqar fixes`_. Select a fix -from the list to review. Try to select an easy patch for your first review. -That will help you to gain some confidence. Download the patch to your local -repository and test it: - -.. code-block:: console - - $ git review -d [review-id] - -The :samp:`{review-id}` is the number in the URL (check the screenshot for more -details). - -Example: - -.. code-block:: console - - $ git review -d 92979 - -.. image:: images/zaqar_review_id.png - :alt: Zaqar review id - -This git command creates a branch with the author's name and enables you to -test the patch in your local environment. - -* Inspect the code. Use all of the best programming practices you know as you - review the code. -* Give code location feedback. - Do you consider that some code should be better located in another place - within the file, or maybe in another file? If so, suggest this in the - review comment and score with a ``-1`` if you think that it's that - important. -* Give code-style feedback. - Do you think that the code structure could be improved? Keep the DRY, - YAGNI and KISS principles in mind. -* Give grammar and orthography feedback. Many of our contributors are not - native English speakers, so it is common to find some errors of this type. -* Make sure that: - - * The commit message is formatted appropriately. - Check `Git Commit Messages`_ for more information on how you should - write a git commit message. - * The coding style matches guidelines given in ``HACKING.rst``. - * The patch is not too big. - You might need to split some patches to improve cohesion and/or reduce - size. - * The patch does what the commit message promises. - * Unit and functional tests are included and/or updated. -* If during the inspection you see a specific line you would like to bring up - to discussion in the final review, leave feedback as an inline comment in - Gerrit. This will make the review process easier. You can also use - prefixes described in :doc:`reviewer_guide` for Zaqar inline comments. -* Keep in mind the :doc:`reviewer_guide` and be respectful when leaving - feedback. -* Hit the :guilabel:`Review` button in the web UI to publish your comments - and assign a score. -* Things to consider when leaving a score: - - * You can score with a ``-1`` if you think that there are things to fix. We - have to be careful to not stall the cycle just because a few nits, so - downvoting also depends on the current stage of the development cycle - and the severity of the flaw you see. - * You can score with a "0" if you are the author of the fix and you want to - respond to the reviewers comments, or if you are a reviewer and you want - to point out some reminder for future developing (e.g. the deadline is - the next day and the fix needs to be merged, but you want something to be - improved). - * You can score with ``+1`` if the fix works and you think that the code - looks good, upvoting is your choice. -* Remember to leave any comment that you think is important in the comment - form. When you are done, click :guilabel:`Publish Comments`. - -For more details on how to do a review, check out the `Gerrit Workflow -Review section`_ document. - -.. _`Open Zaqar fixes`: https://review.openstack.org/#/q/status:open+zaqar,n,z -.. _`Git Commit Messages`: https://wiki.openstack.org/wiki/GitCommitMessages -.. _`Gerrit Workflow Review section`: http://docs.openstack.org/infra/manual/developers.html#code-review - - diff --git a/doc/source/contributor/gerrit.rst b/doc/source/contributor/gerrit.rst deleted file mode 100644 index 99c0655d..00000000 --- a/doc/source/contributor/gerrit.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======================== -Code reviews with Gerrit -======================== - -Zaqar uses the `Gerrit`_ tool to review proposed code changes. The review site -is http://review.openstack.org. - -Gerrit is a complete replacement for GitHub pull requests. `All GitHub pull -requests to the Zaqar repository will be ignored`. - -See `Development Workflow with Gerrit`_ for more detailed documentation on how -to work with Gerrit. - -.. _Gerrit: https://www.gerritcodereview.com/ -.. _Development Workflow with Gerrit: http://docs.openstack.org/infra/manual/developers.html#development-workflow diff --git a/doc/source/contributor/images/zaqar_review_id.png b/doc/source/contributor/images/zaqar_review_id.png deleted file mode 100644 index 59b769fd3cbfec056c372221a797df1b91a230ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 77939 zcmYJa1ymft)-_6m5E3B4-3KSQJHa8i4jSA&xO;Gd26r1|aM$3$oxvrzyTfbl{qOrK zYnZHFy{4 zLKN=#&VE8UEv^5AWZa-+Y9FBY~3= z6;^SbKS+1+#4vv9TVZX6385*GBrL-#LV}mkgdkx%>6;(+U?a;P=Mo%ph-Rw4Kc_Fl zt>M#j1GEhtB!VhmN+-JyTT_6tc)j z{%_cYk%rTJ{=adzo384fym2Fd^4#w)Ef6%NQ*m;;BEc3(y5T6YiopM9svI$2(nO#*>00lPD0yn{)M zrvLhX(Q7xv48#$w-TDZGFC0FB%U!6q;UpX#xuB-RBLdBIa)ogah;&$(oPx>^|CAI9 zG&HoG@khR36e96}9UKG04KoaE3Gb&<<>gFtsx#Qxqe(Ez-vY&4S83@mvnlEfeK7f4 zi}40cdjWm8k;&`-jjN)1#J>2qL?V$5L^n;Gt_I0%xW@(ydDi;m=W4|4r-zh39%PK08$`!r= zJFhy3kxQY4J@^XM;0Q;!yMGym;Bb4g#lFqY@8Kella>h& z4Y%yxQQY{=`Aqkh?yuQ&)v%PF{MWB%jnsjEGWb0-s!fm^8yi(rR6e7li)d=%dEH%P zi-e%8BVzn-hZv9xnZ+kn9xbqIGg;MSj&UV=L|mrMF#q^|WsW-(w85}WJo;^8+2hD` zB#k?QummxRD$_Lufr`K5@mj!6`mKbbJt;iZ8P1q3Q$hkJupV-9b7^Q9jGp}-)}DbRBXVZ!9-@@J>%BMD!B{EgWW7RS_i7r2y~V zz|eq(jxJxn3r9Q}Z!m}AoQ8&GbJ4LWqjdg#I3cfd#rMwFRaI4zGBRD84#4vhv9V#$ z^Lb>~g(k_R^Mn>E2>o}ga)$BTcLKAhbnc>Mz~Kn{jpLJjI=r`uL=@vBZTMy{FD*^( zsNs>JKsNQOni`hl){uVpPt>7I!Au%?S>wH$!s7CBfnv)d$loZPg&i+%K?@5DyJkx} z9zv_RhA3Grj@bC$qJ`B>V|B6~SA>hB{$l4E)i#l94k}7YZ+#aW&kRHqF*t0NeJ62y zR{dZq>gvj>s(yo*!%^uAHx47ub9EaMX^M*Ri3tjd;LekSUtGCHYGt?Y#1!SeS_alz zm1;M}mX^}5udlmaPOB$#J5oa1?r81p?U4w1;uGl9!>AO5R@FbKs>VRxwNb+7 z^cjTj694r(%Q)@Ky)}4D0Rw(iDlBqu&tk?}Lu6YneRnP!9TU^ol}|TKj>>Wtnw^*T z`&XAl_0xhA1{PMVn=WMnrg4KMWOu;>PSEN;3O>BeWM@_XG9%ose{e8YIN)8&^|HsJ zeIH?WpQYtSC{@PlUpgp=k&)2~A2Z`a?+{UEidYQbLyuFvqgzp~%l z^m)75p6M+<8G-cfVjq88vvjZ)g@CWWU|5Sw(bZqvKGJNV4!PC+f{6p$!6B-`d=Lw` zz=?ZngEoWvkA3NDn@jX54hSH)7aP;4p#4>q=bc*gI&HsW zCt}^5>VPM>zHX4=;r%WsCUpSHQAND`T2Uh_dp3Hs5cMu3Q zBVBGqg)a~hX=r);`t~~yoi|P!PSZrk+?|{-Wz^`&&L5V49N1yy=TlIJ`GY$D^n5HT zF4h-n?_{&W>FRh2u%B&6ZZ{K%rB~KbmbF<%ZTcaQe5`-jx6Ngmbzth;VhZATRFTL_ARGeyBpiz>$dm zf5tfsc@v2x~-sEuVQv3W=@NlWG1^%OyoSez`=cgK* zrLZqr?S`9s?7gSDJ>i3xVk^t7xXkpa)jZDMmx66!ZH6KC-#~{*cwD{**&>hLN=iz0 zTm)>^DX8|mg4gGiFUZ2q;b(u9qORh|-ZUK!zXd@hvwiRwm-_+rU?L+0uO-*%DPpNk zdoao&FO$KX>aZ3kbHy_WhxJ_|5Rx3%8p2_fj=cM`P-Bg&pYESuB^VDTopx`? zpN{a_vf8bZL^d?>mugg|nHC5RmV#|rzIO=4#>Tp?K83ih5A?aYo{#EKwc zkfQl#NLWq=K{l-JQ6l&gp|FDk3k3y`M^GS;l3Vv>ZOvMw}{Nw$V>2Q;((I-M(&N5OlERu3!bXP0T_Pa6>7w&uMs|mhx z_|WlMc4=qKL!*UI;((}n4}K*TS6Z`GGhh0knSTih2?YGk5l>$C!JcJTZV-~0c#Z~O z*-|HcR1c&{G4wJZFIwWMJC`BXe#Q`6yE)xpGoR?L5|C3>#bihtJO(fLtde+KAJ$?v zQ{?A&b6Oa|GWs^MYf2;Xy1OgZyVkfG|NIudZpnEq75lT2q<5>Q@F~b0IyNPU`%F9pO;@4=xzs4nw8HXw#NFv?7z|}Sg`(eHOv*Q&{Vj1lyi|YXdZ`Ds8)lz9 z+bzsID-?W8PSSHj_1)PqJ8HbwJ!;xUq5L*hrGkvh66kimZ9kiS&9nhC-CGQaMUypi z?qrfT#-J63%iJBVtDpj_Q>+rL;fo;!!?rYL-9kuQZsfJ3Sok;Wr$m;O~1) zq7d;2Sy<2lc#bPw4tSck7w6Hti^buYV@$dGjY~T-wY-^LPq-BM6hJ_{I(iNb?0y?X z$gj37AW2HrjX6CyskbuRHe;DwcO-XJt2;P-9LeE51QmR|T%v|>9$e}^uc@+_uy3_I z|H$85Fn?GXxvsH)Y(^(OzWuZy_aGUd4#I08J5@dSAyEde|o4dnYUbLMhmVjT??MM>YTS19#1}86t!@I{bEBxU)#Bnw95$=B z?Ck8~sf;ZTAKp}lIG&7=x?k<~B{^=t4z$`Hr^+;#3c|)e<%U2U)&d8A+ubI3gSw#q+&^^nMHqgcVLEEq z7Kqy3#@`Ov%M{@J^vyfa57ve2H0~O#%M^xL;F|NSd&f#ClT|%)F7w`l{hy zbW-*AaWe1u=I9KsyYXsyK@`H737ZwW^Dh^;Y=Qc@y<@=XtQpURq$kna+m3zw*WMc;e`~w|;yq-6i z&kmPPv*!a6&m7&N_s-UCC(F$=^z`fL*Mh5C3&*SJoGT>rn;&3;gf=e7lNs)@z&goD z;fSP}%+w$AeRZ&09WoHjKayGA;D3r8xp8yq>3p<|+|l((pnnkB+WRY4BKe`?5e~#@ z{<`CKJH5{soiZjPqZcx5>3TQ}zStWN=nQ!0Fza=_aq9Y;djl6h3Ul>7f?u@iE*$q4 znr(?W7aj0TU3kJx3*-f{!nbIlmaXpbO;(rgTa21!rG9}dCv<4|FNFdwTfA5}_83%8 za6)-9kaj?dBHrKW2`Y0Q0Df*)(T)!*$yd&WH>^jwkf`=ys0RBWC4~iKc zS8iLQnSEKp0m=NP9568KTgt;{<%|qXQ*2sav!w976bk|rS*z}zUPToZ$y_Gf27vJZ zx9cO{UmsI(a|g9~-Qjm`IEl?VHdh&+n*i|)KYD$dUIw*^f}2oIdt9;f#)JY=>8h%FkWzAD=az^XgUTPrKwm`=x%r`vwR(5e6L7i4SW-PDKp?>VA*Ay- z8}3e04a_>~W-yG&*9kbcakZI%@hLw(&5(*kJD1?>urm9schh<&a*( z&m)~?7N51RO=(NXhK+9YnpN@p<&7z~AF(GIl?zGgv3w&_>nkqrkKJxFM<-`FGl-U& zSPTw=JkMa;?{JA&Tt9m z%x3U|C@9W0>JD3;XXt?`!l?e#3K{8G9n9CJx-38Y5-l||9}ZCctg^cg`=L8@{jeaL zdZWL<(QfH-JQI2S{HQ-yi;nnuba;3e*nJz?4&y-pb9UnuHLqhS^!zlG+U^@I_=qGG ztDrjVI)lC*Ah-e40Dbe9`@73!_~z#3mp0q?#NOTem+#<{B_>7&hLQoA&9?)Wj00{R zzb7u8JtCo$xRohM%XTi7=~Qm@`!sTX`fRGlIS8Z~C58;ltW zPywJk5?K*=ob&lkY$zU?TXFKXL=F1=&yAqnza3PyvZr8j35N)rU;pA&s)CIsOU+rg zJTED7B_c@$pSiT^yd|7xD%@8-@pfkCtiup^d!>Z4D}|?lt}8`J~7HHjat*SSNHi8lA?}<#-*&t{tgrlj ziuQC@NXPAZ!W5oJzxj9Yxu3H{&2eGHw(6kpC~>OJ8ozxwq-IJ5g|H4~Ua%w=tD((u z)F#$*v_R*X{up`;Q^81Zwq7_n8`^fe)q~nkcUzjRlz({%_HPg_KDo(nf6?e4O46m| zWE?%&zjkdoSn`1za~?XZ*la$grlmDF>3Eg+HMi~Dp`)IhcyHd%ayxERpm4PtapQ4z zInDS%S3{#82(+lg#G{x#4fH#Uf5^W0iGOzpR@i4N&=={|`$5|1<`$orh>DLdVsDQ~ zr&bOkC8^sSTkE3NZHfTq7UQN1&GNu8=LIm|LxnZ|;e6d`b(8449B4H4KkE)EWM#%H zg{aNsiQ(9uGVJ^6jimhG_vcP8pjb$0e=sMjUUrW|++fVLe@}MmmuqE=dZj$Mw?D#RG3s{YCq4O1 z#n0>XE_patnVlLu{n*SL7#?I@EN;6w3%~mq$)x&-RJm4G$=|y<*4;bsH5SB&HG|qb z8}QSxSp4-uujP>!>`QRmk^$EWwr8I;-O~v1w$7h&!DA}&I`gqcpavuV!k(ZJr`I@b z={M$*BKUkPQDb5LMMWZ-U_emMH8fiAJ}rMx-EQ`7}hoAGqieyKxB0EEI>}djVm?L-Ga;Oe7K%1L9{utV6z#WB-gR& zFmAf~9Zt#X+-Y@;)bO0_bCPUBU~XU3>D1INo8^4GF28$Pteey&Vf}N*tEL5QUc1AK z5x&g5Jv`tNBFm>6;&2y02$Gckwq6=V7#ilFnSqaAGVEn+SQcw>HSvu>~f)M2%r7pKpPDnHh1kP4cbQh;TofsS{+ zJ>Kr~teMSx+Zl(22lpkVKBp!x(6BG+Gv;PaX-go$wZ+D!{55^P4?N_pQ&iIY3h>MT zJ<`?weEy0V8E>L}MF@pxf@Oe!-#xH9FfR@bFUxpMNj1`Hmw`kgnvjB>BEjE(ms>6S0fr`y-QcjW!EDtG zy_O%idZT*ZOY76p$ZM=8du@^5wV4Fz@_UT2kq6upr3|Ii?sM6lkXX)?8xA*J_A;MF z;xW_Hh0mI{!gmy*lJ8BAfC+ggnpEj@-42~tMtP$tWmCp3(;OvbWH)O$O~k@`7Ia+j4>KLFV0`?K#4pJ=p=8z$;FN$@d=`ROo7f0UAwE7a*74u_NfSO9cXzxu zcbAbxKG`Qg3iRYQWoULjCZ(kfiz1xIuT)COG8Z5DmHUEj^@cO$&CPM);o;%f6la7nZR>dmID;jkW1$ zY1UeqyV(F7CLVj3;q715U`4v{kD=spoQH*R3lZVxs(0zBMzUmE-=08?sIm@xx< zaEhlh4jzvTW+o33Ogd7h64v$T3T-RgZEx6_Y=3J)v!uS1ig`4|L$&4;Gx+~ zZ~9OQ2xO$?h7>~}WVC;Ku=SpzKlbT(PCTBt@&N=_-v)Dk{1x6*Ly6l{=JUTgEnC*8 z@nx6Xob}Ttc}SRB!&H5kdZ*4tL9ewf%#N< z3FXJWJHv6??Yn^iS)`*xCKcV5FN(R>8_E9-AS@_&!n5$i#@$Twqcy2t`_2!fcZpXpdRFiF1sf)vEE+%<& zWc9SL(C(bj1gPqCh zc?17ajf_4R_R=t3)VNpVv*v76#NdKiG7kY7{NNuXRbVeT)GF zn?B;_q;j3+IJc((1Xcn5L#YCO{`AGgT)z8<>LLgh<7u=xYySXu|M|0L#)3^Vp{nA| z_E55>RUqVSv6}VP{Fp)4bxo{5_Ew^`yw!eCJQxMf!aSX(gvMl@ZlmF7A~e47uuR)t zG16@#&c${6xH{C4;(^Q(RCvI2xNO-86#mQ=yWh1@u!q-E?i+*LR2gp*`id1T%&-;~ z>qdGXfUl(mY6&KS8vs)i1C@A>mK!Ebsz^xTP{T7QhKUzzda>5W z<$Qp%ySpnYCYE0Hbg|FrJFLZV`gDKH;y$F2!X{TxllQx&#c-;@G6sYQPPFGqW0SM^ z(;0vOuKK~8KCoHYyUknx{S9fcB&<|#D_zdK8!Oc+db)iGE2 zJZIq$mdySk<>8rUOt<#3hs+nxmIu*~FJkxELGGyrS!6wFtoQxxwtp*^K2403sqi`< z{n`Byn~^B4WRP<>FZJzPH^J7gj109xr)`HuG64a0ip}_l(f1 zPD2zOa#glaZE~r(HJmD8A(9pb}2rZ$rgQpvPxPnh;y56@GfUJ7k z$2!@#UG4#^w3ACYdOMJ?` zgAkTL{sLUby1mC-s|RP9R=vrJU#0>ke9!gMve&(&v~&(I-`~G~H~Hv?j3J=F2Kcf0 zh23P$`y&*qK#*a1;kQrhC%Ow8H~kkV61at#w5UW>!BqJrw z)=i8A^exsOCu2V!w*iLsL6BFDGAi3U^Qyh;1|GGV^hGk8U&ByV@BwxqDkkP-_`NqT z0m}NuMrd@jptWIiXlQmrUc<8L7&)fs>(}kmqbUC|fcUMv2{abT(xqmgDagw51xS+x zEC~Q~aK#B6`t+^qXmGsH!E*TjIms+-3P)8H(C>H?R1*xo0yMFm(PLC!n-L7{zlI$- z^q2FVq^Ea5w6xLsu8)IXS6R!96)mZ_C{ht&qkF8ATxM<*@<@6q2ndSw~1a4HW~*~14V|u?fSkKwpOZ@IvF#)s)Zr$ObOq0h#h1C^@KfE6LftVhCPI>8r~T;v zyt|HKU)Go@px8E|d_%QsZv6E6)5F)v6=j8AF>upRMxFuYIQ$ZhQpu=2bo`T zVXQU>Cv){<^}+v&5t0d&)|epN3)k|H|3K#36x5A^64Es%KZRIt)YQh1IFtyNzVj|h zmPInos8Pn69eb@iXvxOmb7O?>lm;sOlC+_O#V~JQlj`5Beh51Z*?79L%0ZM?)Cer@ z+z%|24R0%8nT_%`l~6lMkhOVX{f=q`QN2 zs7XU_!7B`AfxKb8+^FxP<{3{867ZuKvJU!7+xO!!P*Lm1&D6S?;x{|LslDlmT4rpvoJW-@ajTi% ztS9RiUgzpggRiwr5dkMEV%E-hjs3DNAPY+F*WwYcMly^i{>WI^e|Q~42;RNGt}c35ssokZK^qlMTiVpW@ZFzbzDzj5iV z*zy(?bnbd_1J_C!G`o*X#-KKntpJh{oZVO5`_a{Pn1Sz=GcybtbNbsb+GekT%F@go zS?U8R^YJXY$Fz{eSr)S+{|-1h+`#Y;I36~<3}?^IO9Vks7^#Mc5fxH!>E+{! z9WGV)L$%cP(Qd57SdK4I>k{{wVhzbEAWvxiLO+pSZpTkAe{QV$9*~t#RW%AZ zNOD%EVgk|!>BdQ;q8lGkF8`NS2u6*gjhGGK<0)$x8&MDFw}0E7v*WTlPDGa`3a^Wr=uoTl562vjc!z4=bZm=I z1VD0jn)4uG*qGqhUl&ugVR#sV*$~>Q4=8N2jABZ!Le^q~K-vABdW&n$d&3|}MSe;b z1_f1k+#Yq2w?=7_RX%D|j+zRs;A;3TE3sJP6ITp?kdIw+D7YDkvzJ``!DF$c0|!&2 zTgCvjENkwEJ~el(jX5uQ`72Nz-tkZG%j}3ME4EUHS%7$E0%H@+2MH_#@oV;H zwqhk=77c6M1ZWV>m=0RW*e;A}65L86clww{!6>-(66z1qaBflEWcsH1OyS8L?)ZL) zSU>c2o3Ai(<$|$RHkhYJPZ5z|s6UX;pQ}Wg^}-;;$3Dos4urHirEYEfMD@Gp5;Gab z=LLR869w-C`1y9J_XU#6N^8gn>GUU7wiVU>*s&4zb~O4a#W?Sv7GS_|5SXTO+Nxkcs-n3n zDQeV^ev#$;W+t)mb$vPsQ$0DU7{R0p@eTYWorMBidtQ+@B5}m^MP*P?fIS4!)2myI zYcR3jhTZ?yM0@~CO6w6zL_MS&`6T;INXv(TgCs@h{$8)FW#t18$ba1 z{(2${f(441*LUsu%oF*qBsi!OO53<6d}hoL_szePp<$7>TJ*bF7zMoE={Mf3*Br07+a=UyPl|JqYsEr{;CgQN4dbpWPBq?{2r6y=kK@gFc6Mr zv@mrX?;yT(VXwpd^C0nGZisJZqjFEO@u{CroFD@30edjw^;>=N3}TL_oUdKv2%-bsO@^&tkkA*^2=sVI_VWJYRbDyiiKqYmjADqvLj$C}kF$fN<^2nV(|9I@E z6w~w})qYpe!3_b|Yop`Au}?Q}dK${y9kMtC;oEZUO9I4Z_Z6|v+wTjX1Vw*D54R6O zu%c19G|%&sVl8?CDDmkwCTOhG^Yl4NMqR(Ex@ zo$FN}sgx+mElO!lSu`v4+%ky8BH&N$N-jWm^nYJHbv^=%>#2Mx(t5U+L}O{YLut!; z5W>cDOm+7$0)O0^O$utH{0XZvu3OgVc|Z57Mge~D!P|(P?D22=7XE7Q3q*sdK^6Uo=I!%o(=R1?ElVAwh}sN{B8ONUqcZe77rUc&S1 z>xFuA1H
+V$Nmcwr8{(D=7eq63uCz5;TYuQ*3z!2PaZK|6Y*&ojHd_+EXQY+IP z1dKBK^ct^wDj<+Whw~L+X6I6fiJ2b(3xc-WW&&I(xLVxPF(n3I!@EP#Pu*KRZ`MNE z2bh}daaO{1FrC&?WA)Vp{7As^nNU|8G{z=n%&U7PF3WjoEIuE(IJSbQnfsRTyIzd#5O zP5jg~Q^d-K>tC_=cIV*PbPGB~Y--&YjA06CC8UzjCquD6Z?^d`)@n4#)!2|iJ~y{CPeLmaU z+sm4Lcey{9SvR{&8Rg?@zkR;>dWe#N+(VY&|Eh^TBFCstt*U z85UrBX*`eO<#zbjZ~jf)zdH_6L?k^n*0NZDHu`}Ls zHE-#Co|K#{Wnx145u3s2=45Tr^L!Ws8++l3bn#LSd%o5Zp#$i=AR}r9^k2ZL{?dei zM8x-7%eFNkS0eguSZ0+E{;Abu7_T=1w`*rS_jWbl({FZ5td~nHHrNCFFVDK$B$^mX@5o;aQW*Gt zfo!f+Q{kXuE`r$8T(=}@0Ur;jFp3eb5$+eA68g0mAw@u0`Wn4oU{|x57DDTJ=WN*ph8l~d(1?f`BcmEhihSHqDH;%E{^wchU+<6&%*RNu zz*kTloXgJrm`u}VR%>>7T_|T!lk@kc)s8=&CVwz?C^225z&IL>%0cr^>{?&QtVI(1 z#ljL4svJtfx^q6?)URApP~@Gw7uyp#vl(CQ`JYwWqI%mr=gN=wtm_uBVsRq9R$Q=$CL&Wd30fPAydB(so} z(bc#C-b}e>@nieE#P_?abFJ;Qx68#Ccz{*g^D5M2;J|dvP|&Iyo{!s@7Vfz9nt|Bs z#RsbgSD}NQXO4npSRf#B?lgcg#TA}#_7_R`S1}X9Vck4d>c%S~w6jji zOw;rE6N7p{T*l|V(AVTU;M9+@W@2L8c09&uaxgtRH-2g&PRMLP9xWG0)@q=rH~N-c zN;O6W9)pqEy?|z#YUX0ej9<1w6JKKW;~smT>u;&-T*pe3UG)C$sPW@x~~e<`fYvXuzqeDz@A-G7Zpr|H0CmYhrLv zoh})!v=SJJACOU1#6(0ysQM@R=M^LZB1x^QkRWu;6;c`H)W;T`v#P4f7nUR4=5H3egMMj{ok@UIpw5N-(Q9?U<~Zpm&}LyRc#875fb zXbLd&8f?#&z}L>)E`1z{5|U!2^QAOK&tMaK${bg_3$f6pEk1%IO8YHm?3+MT8|R{> z&*Np-t+?D))OwyzG`YPIYvsJhC=SW{Ufz@gQjxz@Z!C@o;DSF9cUG*C!vT((XP9PS zBI9gnE*K0$8vgtG`_qG4ul;%3lQcS8SMX&1Aa|*5bMWkn=Tb%0ZccM)={ItxI7&x) z&LoP##IxQ~PI4^$Uz#0c+>ETOaWBm~fUlp$`9R~S^;&nMHD4y_u9r39;J^y5?di^P z;}KSE`i9A%`@ML?*w*p+sNkoj-pg6N(F*-8xUWawLz640{2!M$0g9V6re%nl^qa!c}5ok5z%+1OR$9`W#)k>EH+sn z>juPj*N4--S6to?uHBDOKXgRri2-7P6@SF8Ie(_{F+WD&;E6`yaRqwP7+54k#-N$c zPR?98fXgeqrSl+^W3|a*f+=dPw;}w8bf8GQ*75l!pvV_)({6g~jaaa_bV7ySfV05n z77vPFVoJxoMZNPM6TJ_#Q-vC8O(+Si5@mfTE+FR5DH>-zjyFd^+{P6jsJHw>UKt`K z6w~gotLD338pMEd?k$ZvrfqThKPk7GAlmO4dKZV5t_Q#V^4ZT`+;4WuK+Qi>r(_#u zLy*p6DY`v4DsXwP)z`Sxgo=*d6}{pkKeEb1wF>vfBHtXV@v%8yFZsjn?te zs}BoAXHk1U^da#(IxFUG3oM=Z=Q9vp`R)&@FAf)yq~a*;4{K-NHNt|wdd&n$qJJSd z!T*e3cl6Pu-A1p;L#j-+ICM{@eQ$68`VlbzLj=DX(*4%P_)CsW$c#gjNg9inr4sDe~|84}H&Vl2ZjG36<2+lHc!U zSAAW%BgQK5pl@jUrD(>=zLoS6eoHLdnkBg?m`BQ%ELMGfpf#6JIR8UiqO73w_G40> zf{j}2sP%BG7hTwB5cIpJUi(J;Mq`7EXGpcOx*#e(B55VriMN+xmudeQ)a1~|(?AK$ ze0LWBKtC@ckkH~{A4XgnwJ-RNJ=MxJMoJhT(8UyY#`jz1|Gj^8D8(`RL}974lc?bf z^d?3T@k^VV7Yi4#u&`LoN`C$N8gSpxYc$NoiWU?UINY4*OUuXr+G)*U7qCTexa>&w zXUdHSVkzioqJFq@1$K!Ax31Tdw6wJU)@X#k|7*Qp%0Q!meJj+Zslu^P zjjhgfGy^r7e@b&qdBCGp_wV4K&(*z(`cAoxa>2La+1p>`AA#Mm^3EtC?8A(p$u;Kr ziXSx9$0C3iQ|9X{gh|GolKmLCFf5sLw)GCfGl$rW#KgSNf|;8#}ok zClN!?SW;xcHDdQX44ufp% z5TZWq;;>XK?&0|{)A_-gF23p9J^NjcM5Q5oK%{f2=$Io&XA5*t9bYKc%x+ZZ(Q)B4 zr@M-&Gx$_I=z{KeX$_|W)iC3Qs(tNOqSv7P(DQzx?r!CnEBu~PDpr5^Jrc_7($3wD zb}@&|9GuXxfEV&CMLUue>5+Yr2a&`1Du|T_`MOc=i)MpwPk4=o-FVN2HFUB*lVb*m zF@u2mytA_PgVgkYUI6H)SzX6k`$c%{h_~NQbAqg3Ey+Ne;90%pbHS2FW^FEzdiSBt z8QB3LK<{#w-qYP|md-gV(1CW1@EvGHwwkXA9UA)fw6_oC&>j9$4fF(KF-&dQ{s}}B zJ{(G6AGEu~WiwL%UK|h5PCXz&>=oFxwhAdJ4|6@p6_0%VeeF*WP<12&zfs~+1A`6v(+;AXwI4qx*-OkUs?ba3kT^M?+=+dpuuiYkh19n{Y&vc-L&(|gQf zbWgpGGgu#K!DY^0QBlQ&ks`obFCc!_x;2Q~o6=^gWaum9Y z9Si-T4ES-JE`AiI}xtkZ7SSN~LHqUFHNimY05&0|9F7SJcYly!R zJydNb$L?A~54&m98AR6gpnJ)w8Md&~Ci1W~G|d}Fu5T5UewCbsRfEcX|udeB&-Mw6~ayXn4pRJKq0Bkf!B z?JuncKm+2sLu130?JTqYpZvPM74Q3WJU8!iWPs#MPHt`7UcQgGemlSGH6OT=O>T;{cdB>YiILCS}OHKgyehPrQRp9zhcl(V8RfGQXyJOjJ7~5{hfHvLD zyL~A1`qJCn$i!sbj>~MeoN$!?ngD1Dk8nLer;>ZIHUPjs94JukE_MwgHluitzVA&H ze@o?vL?HHzdBLKlt@3MjK$kPK@m~x|`HZ2VWY)&>5#D40FP>jawdr^Lo zCGXEqf7hLdu!E>@&AvfZvc*T`QDyz zV;dXWcCxW;YqPO!+s?+eZQIGlw(V^6&As>c|GK6sRa0+gy8AtSx(}Y`U=tfBF1}n= z=lvyDvlWHW&@MkK+RMT$^SU+xD5YK_P+_RlD0$X^Mxc0jW589^g_4x15mVTC`R_5+ zE=iT1FTqUD0;7r_zh~G+%+p{w&TM>uZh9_gI`##m4!Gc+9LaUG>d|2qXv?ss4D0YgAANt zD{DnO_(})~L4qEkLPF_QBgTyUdf3~*q7SLKd@?Sj%+ybb1X2}EiV*$;>p_s$=ebnO zF&9})P2;$L5i2CkjJl9|wV?V9h^y8N=%!@00s4EjOQt6y$a(4 zq1I!PBtN#6f20<8>L0u@FGh9UWzRQdm<4D1vRiIp<6+4)@f7f!@7N}|YeXGtHS&eVOzcbqYN#~IFLwJn3- z92pdvQ1c5~AjMXiZ_K(V9F~xshbOKt757l!!G@M}cG!l2dtxi$T%b`1b$G0;XMLF} z%g-NAP;--GikHx5Z2u{hW1;>wK>rqxugTOg5?o;uMXJbqDpsyeKe$-H@xFUTn4qN| zV9J&=yasgRs;oC*K6FSbn%$GWbtDEO8@gbtJalsvYOT@#{v?v5EDu;F_T#VJir8xT zF<+^>jr#XhIWW8_YjN=UbzvyCJZOV=78S4%+$&D3^yu=*_$dU;jMKb7`iY3!7ZzXe zE*dO~!SF4i2&T3nJriNW`4}IlhvQS|v>o2?U9(a8kWTG=X26A_{)=+%8VMuHA*@l2 zV7Ah71&&G>G@0|%=MABaX0E(~z4g3+tLuQWnG7MjPV?Wad22NY zfz{w0N@IQjN@_+pnU!q06eRZ^??Y%Ho{#khsdQ4yROzrt5*B`L$z-I7_lfb3G8Aec zyV&(4NJ5u&>nFTu=NHQEC5L$h$ZZhZk$4$ADMatT>JLI z!kV^Qy#>82|Co%P+Br?q(SuQrqRYVca8ou@mvgdtr zsElMo?Q&55NvZ?-BfY;Z(PzY#mlE})2_4-$e_T`15ZUgZal6W4bB{Bi7k>^_f7`R@ zK84{wiYYAb)>V9aiD&b4RnXHldDUA_sT%9Nna@qf^#3^;izbBeNZ2b9Mem_XN=W6m*2ZN9zJZu+m9s%hMhh3e(Q+&PnC|oZkV;N0;9#2{5)3M6>4hAuq7aYHT$l#(^-b&fccYBpgtH3pSlb%kG* z73n?V@q{^CFqX?(QJNH~*lJb8_qMv};CEnME?oRYe$Kvhi^U&at!Amd>War5!sVrz z4VFkT;UyDo<)|8Y`-70rof+NmvC2800THgrhtQ|16ZTw^HCi9U(gmS7rKO++B#0S@ z!JDSc$6@W=ug+66Vm=(A2#c_ha2iPCj%P96JWqkw$5Dx1kRl3C_YvrpiTYUd0_dnI z;bPP19 z#(sngw^~UzYsQk2S;hS-6d`8Zk0~$jUPCL_&owJ$n7=!tHY(5=6f@`AG%m=G+D7H5 z8u-}!g?H@4qWS$GPBT1ZI%6Vaar;Gg|H6h3+k4E!=fL^xSzEKq3E9;3T<~7aTI1%> z)aO{oZVr6GokWs(KM&ldg52l{s@qr=Gf`_R2=5(ZEGE{@wb=s^L~2$0KcND zH9bAMkAOl?SVpFDdn2WXH+WoLrVM@CJ3gd<9Le>|sr<-W>ac?W$JYJHs^I2{1`u{N z7fR0X@y*s88p0Ph^4Kg!|LdUDZO`Vn$iN&bt!~u#R=VUX55wOqRL|8#)^?KwlZA@% zauuJy^vTyN8kGKCC_ZeN4CRZKvJL*zYg}Dn}Bc2LX<cR(SnxK$>qmDINR-E1) ztr2o*4{#h2zvylpjq9N28_8byBf%mbF@M=eBD)uf`uKv8x(MWapXrILEO+Zmuv+-E z*4JEvIVLTcE0WYQ^DihBOEr1F|2j`DP*hc;*Bk{d{A{kEa7YXuSZ{*6<7%@rD!efc zv(teFUUy^mU)m$Qm6_(M4D-2mcysKj7oU5v@(SwTUqdCNWNgJI8O=>XU~qZy>Dhu` zM4EUr6DEfVqLUvNsPoL}30sYtaT#NtUmn3{<0*LS09-tc5mBym?a{vK-up z&NESF@}UgH->;c1=MfIY&#z~)Q<>-dBxaaK1u^o0O+Ha$J}DWFu)JL8;soPbS68AiOm&uTLT)y)Uf6e*a~q zT5{q%4ol-$t1DbXC&Fep^9e>c)^wOmzUki~j|9wkms5=>>l~FMzm3Ib>nvSg-k@0u zF)`8e?Hd@QvY5dDvsLl}@2yVI)HH0I-tSQI>{uavj}MldIcWe)b#-1Yd*%fTHsW>h zyuwnbF;n^dhdB}~evc^hRT_Hg!3KuTC`fqWUPf@=ufHtxC3=I&3o8DzicOAS6rD33 z-)w}l0Yu<(_(rdHI^52I(xDnB33522VwAiklXW=SUxz&zQ#l%~P8T5?OkKSx$+^rv z9GTha<2l;yPn`0syxTh9&qM{4O=3|^c7?C6mHSi*nHzNa0a~ECdT%{ez!+ zq6&#erR9{U=#7KSei;->te6(bQbmjQ1vyAW;2v2`Wx5^bZHSYwJ34wy-mppv3hyPe zUG{|d;4l##lI&4mObH15_sqh0O3%xr`Lby(3=4A8P);nOp)kzpuPPR~D%66ccPeR# zU~PlWY#_!GdZswA=k}bbc96-*IIZpNMCHn+-PV^p!-O^?YAR|Lc5Ii#pzY5nvn+s< zjVg+57j<=x#`6{El34t{sZ2Cghz9DbQ* zH*7Vf+g|DM1AzIQwkue013xZLNw@?Cj}jC+41Ze=cKSvIz$#nO3LDndBLe8Wd8hJC z>hejq%5|U}$?vztzK{ipC5ZXAUuCYP#9p+IR^uE8Qx-NPUf3ib`_#T;wY3jzu^qYV z53ZFG)}D8ne0lcGr9#vA@+pq$BQ#I12{rvE_0OO%`{JXm9+us!QUSlP1;uIeV>ZOA zf2R_x{pI)(*O9(G2>zpv7gVY!VS6xUImAqZdn}Tyr^Ci>7y-t{4C7Z4&v&fJ;EgVi z+W<5C-Kupc@Pd>ond$Xqmg?I_UCG1IABnX`0dd7s=`8?&iUlhrHErV=L&+Rt(UTz6uG%1x3urw=}jSr&K?Y$ zJ&^cdk@!;QHjzZ7Kp?4uQGg$*kO?Hv6c$`kj}+-qvy;?Y?tS}v=BQ(h=)m2YmDl0F zg$cXZZ>4C1xGb1P!Cbrt*`^c6QZK&5D}bdv4v`#+Is@1zn;59+r|yC?ZH1B}<$v|a zmue}kfQV5s`IFBe-q@@aV$>(H5q32?3`CL?nF`V`)*5LIn7N~VB}qjhaqx7umiA)% zKa3lr65=LB!w2ewsCgx2{=9&liKgQm%sOg@-#F; zH3pf=d^DC!${==G!TQ~6kkZ~)G1#p|6C0u=OhhjX-?YTH@!8Phu7h(R|EEjQ*hfRC z)C4S#doP?)LWaSd_z8YXbL{VXCsDXqi3Kc0q zsV!Mb@h8DRpuzl!Xw5!>+hj~%w0jkIu)!wgV*(E{gzqMUjE*yuakSk|z!zXLTr4dc zonc%37`fb_9|+SBu7!t^AxjFV;2_b}p@zLYgr(GbC=NH!&@p07F6Z^!5MdWe#~J>k1>i_cp!qx^HsNxO zfGAMGicBK4Q3~iubCHtDNDrnt+Jmp*lI|h)|@Nw;WwKsBy}1h|e|UKkW))wKiFJ;MO)H z%}%*t7+H7=&N!cUsWoB(#jjQe0jb;0=sBX?+HO!#ZZx4=j%o`t4A$Rh?2R+SD-mg) zzMaI&>jNO1z+$||4Y;{G%4Z&vL=hqODjuKs<|FMZI@d8%K)nlIjw+vTb_M>z@CJW# z!zwa}g2ko@?fQ3}Ly^&ycV!+0c#6P0o{T9xV@GYIX79-sM}<#;4~*2-E}(@8`tn4Q z;IDs$j5IjzprKHA4lBgiXJZSm1|Y=%aEg2z+>LxV!USQAvC4=VCCtoIe1l0~=YP)6 znMoUIiPl*?fD#9j(Eg&6WhhuI&| z;%=<;CQvFvSCm$U?Fb*vTJNNaP0|dX^OBl8h&04%#JX+V7XsN~)#2^v#{0&s0?)Ob zP|qe^betxmX_OAel>>7BWU5$L`9-`(H)%482+_%4p?HJCr}P zu%w9F#+4r=B(oDQRlc-WSF&?)dWkj0r2k0nu|>4J5w8vc2_Jw93Mi+p;3V$nIY&`Z zcaU3&&S1m*iIa-c5fS0=>54kQ20TDzI3)YR#F&_`Ytq7F;t2((lZF|G&nPGc{_LPC zc7HIONJqG{5I8m)QdyZJTtfm%#rZ6uv}s8Bk9bFllxe@d%B7Fn24MPabrz@%Q6y$c zC9B)cDoyFS*nGLX8XR&drEFh!ufYIED9)v5*C+QeB~T z89jIk`5GLyY38x?q+U`oh0APil*OW$W8_*mX=+GYeSN@>7|j%$0>*UX(c()WPAX#3 zSj>o;W&&c)^C~TQ_v{EX81&{wVPFyI>ZzWHI@yaK&C>7>^Ihx;ANzH~da305_1Ofb z%ps@8;Q6RZ4L`pt-g0J`=}W#J$U5`*^AIv8VM~K>?heyD<4$Y^Zn)bjt=$dj!ZlpKP6- zC*K6u*ZG)WX_l@BPnP*10B9*G#&@0*v!>(gFW$#Qjh;p+25?8Tdf_X#HHJ_51d7#6 zq9!!7R09i$DFx@P%Qwqg1+K}BKtJ>D&vBDZ4sW;^iV{I(RXR?eTP=)`sr1k?0(!vI^v7uG5g%O(=Z_b8cG8LGS8?RLI)@6N_){tY8?ednw`&GE*`)*98H+JIi* zWgqgccTrnYu`hH8J~oOKy7kw2r@FE-M8IWRPy?R-?X9SU1SP=|Yd#w$%JclteSJXr zMmf8yjtB1)8>}2K&w>zxcFrF90-PVgqhU&f9!)BbedxrpkCAbrXAWx`StmzIjm}YG zAou$REg;wHZF_cWG9fcV$xy9mc$(Kd=a3~=f%1iyoOVK}X89gmWr%X8P~k4mq=+4Y zNC8wW!(Lzjg^_kw#s%(+V;d0QsLnh~SKjmkY`(;iCzH=tlufo*ZA$1B1YaPU%=9q+wu50+3a%2qC?&iVW$qGM& zP{7v73_T>UWCnU#vx5uaJhjt5oaC?JYK`{Ghb_YwL?Qu!-+mA6@P&Qs-XGO3sOpLF zv5}+0>zxUX?17CTn%58MC7VH#P42zfGYAQpF7no2uQl3p1;V5VHJU-yipUy&Cw}$a z;y-|w^7HKXMY4X-;mXGxfmJGIn9ET`JOMOF;_fu+xCzdPm%cKrC~j_4gO zseG_Tg#i<|RBkk~{obmY&4;i_S3ngNj5wT_2m)x=)}&1PYR}y~GSQv}d7Pc|(k-e; zl|1sUeYc*-8O=1)NH6~#I)EOBthOQlL+<_;4PrE&d8e`hoGk;QV9t9dpQ4Hua6!*b z1aItIhi133>`8ZteXshIsi0u3p)gG;;4ZeH-{eT4&obC8kT!I6XIrGvoIKRjcm`HR zr*r8R6uzI)db?w4q|I1%5|iL<{en?M3zR8Pz_!<>z+#*WvG!9c2#y z#lUcqgm1UZWdJBbFlVtmz-MLQ4j-cWz6)~!>`o@R;Lh)ncsF#JydQEq?)KOaMzW2= zPnL`L0J*-Ylo$NutVQXMlM1aJ?YWk3P=i@AyU(^?M|^N+TgcV)({7!9-VuDkuYQfd~JPo z_IL}F2z*gpxi2LZ1VEiQbHi+HGEarNOZWq|35R3cU#V)0m8h!~>2xTVa#>A#arw@I z+JoH-gs`}B)}u?WF~{cW3XAhW(dzY09xrA+G{0ZS52=vUfnbe@M7(cmYSZe6%oiO! zb$^RKy;$H*$f(S*+8YiloM5UA9|Wkdodn6l*%u?i$mP)Pho?Q&BPcQPdTx0+%J1cB z;&x-W_NNmncY;=aBuUhk&a#1AID}x!n$WzStl)^tjkD6fW2O+2;F})sBCmhhGI!o& zd^ww};A>L1f-Ngt6cF*F=bbAQx;KBY^L8pUe=wBdgb2A#6mBvuopX7y0`IB0R_pqd z+n*e)q0hZ#CZrE74Y@vdnr?h-sw%Cj?r**OV`G$>O>l?63+XZ%Ju^CHlk~VipU%1{ z_+=b%JO&9=uLvs?k(ZbSDzai&%GkaDco-)96eCKIUMa- zN&r>;Lct;xwcz;KUYfHm_uN3?fBC{hOYNTs^gP1B8txC{2__5s{*kbVWw)FxczHza zq3ewjU2Z{#kj)i~FFw6DaQ(TH&BuYMqH4LjHyON*z-oq@rAm#4mLanb6uR5DC*jt# z?BK{{8;!-?>wPM%79D*@SgD=UX=D!hCfvJ7sSE0RIYa2Rm`rE=D7t^|&)yR3XpEv# zMu6U(D63NjL-|6g{a5ZRgtaq^6DRIKFelMy99QE#1GA}jY&`z}kWLk9)@DLH8-9L; zNkSCSz!n8WkP z&``MJsmhl+7Tj)Aa)Hoo^_qilF`%OW>thnUHSy}U?f-HCDi+mfwmu*cLMke7io&d=S++IFJ|t1&s-(K{$j?j*k8Gd;Fo^YzRSb@4Kpb+Hy zRZoRS4c6*%1vyhvoa?knEsSxKID7mPEdFwrB{b4VwcQ4Ny!itnxCE1TL?%Sp4fmjn zZ9r&;1B&k22hzljB@!%s${r`}yVZ|COw(LE& z^BogNId;fk?P4H+1O}6xJeS?lc?;wN(|Nd=^d8X7d~L&r-Yc_>UIJm$Y1C)r`j_+T z{qO__crU)iQI`92CKL`?Rkj5idP=J0-gRZ`<&LFg>FCzvoUo`Wum4PtFChBr(Vmz* zk$}s8><(+9e)iM}_$e)4$nC7eOU8jOch*8X-oU8pERC3m^A;bd>Q_5|q%1EF(kc=$ z=HNKd@UG%ECB>6nu+ZT^iUH=^?j##$^sc~|Sn_V=Ihu-$4DP78U_gQ3uv$SO1@G`q z)L8(Xc{T6b-&(VM;J5g@Z*K1!sMp_Jt!B2uE?04ZKj~nzeE@V_p>BK&F}&Fdm;skt z8?H}hv-N+*q{GO(Mnj3nWz)`OpbB{!7oB#Z&ae78&vU(c-EVv+J*(FZHT(y)x@zs# z`~LU+_s)_@vByJLyU5dF{R;jR5a}rxhFqBp*llyiJW0N9&5dZ&yYUJh_WemSj*dI` z8+a9#NqSpm4y?LLwWz2|Lr3Low{d!9>itQfpdvp4E3fKQ_QbTwLzS$VjqXOpZ|?f9 zkx$Np<*fVdkKRox-%T;QJ&qyz+%4I!hAQo2jn4g1PyQ6~`BtjSXcwAp3Tc=LTW$H` zX(MJ1t_9abG>>@kPOj50u@?+Wl;dg7q#|b#Ya0MBL^u6uL~6@VryB^%HXFcRjOYgT;VdFa(Zne^}qmd zCpf{rPu~ZW13mUedR(`gQm?NH!m;ip%g8(uwMS2c5G-b+9%1imu>HO$~H(=W10+3*CrdE=)z+?<@v|QD5dhA>1)1#LISh zv6DFbSv$F76}}TE2894^Pq{47fI(Lk7DDFF$S^Rqi4Syi2G!*BwDtfVn1%Ad*B_qz zwPiS3MH&xh*s6bOi|HtNxDT+ArwF795WoRnH)uW)`9UNyc4}C&U1a!Scu1yxARY0E z832sQk(_B)pY|@bC^j_h2mSId&pS4(XK_}`Aa@4Y!C2qR{@l}Rqr>eEM&BpW#%4w? ziv>bJU`bDC(1K;!>w{h5$0tkd-kX4H?$<%f%}jQI+jX$1$XFVmyZpuNTpWp-(ehBE zIY^aUs2oN*4j_%?lgX>WhQ~wB1NEQx8*3VqSos!Q61pRygabMAfzVjs@xtt<(>JEn6p2i5yBf)APb3r?58+SswBPR<*$KW2{g z@E+==FFPV;VEV4209t(;xkF?rZyExK%9W4t--HtB2Lsr&e!Vb0NefME$` zI0zl2Ld?pPCtI_Tt)o$aK)sl)P^w}8fJqB_HWaa#z|tGO83<#D4x!{P(%EPD^fCv31kSxzKpO8 z0M$z-+FgIwbVysgEVJano7qJ8x=S{=xgj3otBpi-(P5KAak#@z(TnV~9NhbZ3C;$e zw##o@)Vm}iIgVwsGfUR{f-|U|nC}_g&BK zcB9CMZQWe+A?+v$+iz#;7kt@pva0B7p&SDg46zLbR(HKUgH&U=|FqVc9%WZ4>hQK+ zAdk_Kt_Z8ju zE3Mxj-|?0?kaI->1AuMMCv@BaLYbO6rm!b~=-4m7Fjt|xx3w+sM6-5LOGVwhk5P+r zeV2wPBC0}R3q8J9hEs_V6l6i?htr@;_?}HXe6iSwWjnDg4R7T|1E{;KtnTA>Jd@&0 z-w9_AaR_Xa3HdpXnBG-4qwLlBtN0lBe|j?8O2c~t%2dw8+=kes-LUPfOsQ<==n5fa zu|TUrxTqLra01suDAx3l*#&KsHwLMa4t zS=YQKC9Nir2$kbe)$^)HK_MG)iGkAZF}l~6@crv~L__-`t{2SxRkB)GR<2dy#t3`B?#tE10b&8>Z({fNZw?fHkk!Kckyo*->?3z!fz+Vsw zc&{1;@iXYr=!yw2dCW&xCpY|!PI}nD8p({fZ6Pbd>5D}yR6oR7QmP+&ixW<1C?a%& zSs||y39{s&hr@;*b^1Gr9OvkMt@9(~mW)l>7R1QvN@?Sg&l5!B7(s6e!4P4I37;SF^8^XUK5=L(;8wv`>&o@_K z!`LIYQQG%p?Bo2r*Ue36F+bT#L)EiDBsoPLs5hws6|5bk1RS0+7v06Dv&Dha-IF7Q ziNT(cm7duPcDVoGLCo2M4knY!=?KbbbEl-Msnt~XF@#q7V;dHiY*g_vd_F(l5Et*f zUM^pBh`xg6s-nOEz?0+*rLIaLKyCA|q>jTrrk$vN|8>raQ*%m@GexN*NidtuZ}?*Mh7SY$to^dDqF~fe)GK&;USOXqhDQG7Zh;O z96$w_K>ZSj3MFx-4trPAPJ0%$N<81*L>jeT)F-$s_}@<621lQ$uZJEP3<72Hm$6V? z08S7XwlmPg@aX!k{^*h97yS1myJLGbrK-2Is~2Dw6SM15-j!>C3}RvjmDHE#tfsDK z$mHQGQnyRLEYQrnzNG8Sk z2tApwcUh#k-tm5pyOIg`7IRfKjD388+oh+~19{#@2 z*@%Frju+Q!MY{nHLP_Xbfx*t%S%hEj4}hQE%wB|Cs1hIS*G1!+_X!_wp4Ao%0BG*} z6NgO{RzO)m0cW|~hf$S#^MHax8LUan%~v=z`u!*Pi<_a{OlU33Cz{y9wVv^mzjND` zg!GJ%Uo0kYO7Z#{0l)IdFC~2xnf!*;(&7F7glIGuC@KKBTOv!7d&$XYwaK*6ZhdP- zf8e2iTHsiZXOF5Wafuv^2B?sNQGpVrq891}J$lmWZ08_1{~JZ4{51I~yksiepQ20q zy(bfJaC8&`oJ1f2R3NRZYo?r~-?S+rbTu8K86dfYBQrB=7jF(jj=$Rgz$uE&hMa9@ zv8tt(i`pzro1S74Vm!Pn!QDJ@12Nr#n~*gyI=nat zDh)dV&53SD+bG0)DT?8#pSQhsj|utXe1KPnZr64YT2@r zEh`oWP*$lY*I37O1`JsX>hTKN{gN2zKk+24H=K<#!X+_2OUBQb{CJtosXeL5p3Z*rr>n>mg)rXdFOW|tg3j%# zcyG2k`;8L^uKjjFCI`q3V%6OJ^zXi28&@ z5C|4a4?I1E-}^Tu`qiA|-V##?u5Vb33Y1BNQJ`{C$%Rv$_CqPoj13K$wZ9&g8B>v> z-EjuN>A0r84S5fz7kPbe&43J+SfCakXOlmmSQ$x!jT}7lDw-3C4MRc|dV$+Kau*y7 z4?NU4s%XqBi^qJbQgV_oV=1hu`F&I~oojHG9JR3A*;!&N2IRyP6tw3U3sDR-AVq?) z8+$tdx2@YgT7!2aR!WSe8({1-C~lksS(0PkDFv%dGLRY@{t#ijz(Osra* zpR!9@v}SDE763PcQp^BXkfcMzfrwhEF#3n={ zOiqC$T!?jaxX#)^y?0(k(RZ2!!y!M^%u-R=FOg>pYse5{t-Ioj_qj`DRSExc=|fB39prlF^Or z02-djXf}~Fu5|yy3X13HNOT8(8a~f@N|jv<9>JK1eD&_RR(-Z=;=ukXiAY`FI%vJ! zPA_?(loKgfvKq1ZPI)Dwn4F`;`Z0?|@F$rp3_OdxgO8o>Z=6j`pYO~5%|YpqOWC9U zS~Lm@5^?Y$nB|k|NAN=JzZpq;JS?e3GoaP{&;uF2{2IJw)BleBrL};8Sm*k7(1;fm zD0luW)mu06sYhF%Msiu}970!r7G@ho63|pMmR+zMF~#Q@>{aGriDI;zvNX7aUs}ec zt{W$tp7`$-G$25Tj9i8DjK#3#2bqhE;3QT|Tb!|+8Xj88flw4PGiY%U@un|9VbEBMdSAu2DtD5XO5yeU*MO#DcV4CG{PHE$)|1z2Rx+RILc}X;xA$c@yZF1S_?)e;N}yamIRX zbA1FJQLS_w3yD}%*1i$KQpgX}NTkttXr?_TXb2c!>PF83g+#Ac9SBh-t!T1|2PKgwQQ>R%LaE z_G`k`gr!D*Vuf(*Dg$;Zp;PQkZ$H6Q5*E9!YO(h?;NB$z^;AdmwGsaRXc(V#p zF#ve>F*o7Dit^QLER!!(?SfPhRZD_-u4kXT99GvM_YH;a+4v!JJ^&nKkEVYyUZ^ZxXU%oum z!}{QLlwTHSC)&yh#HQ)Q&EwOiZq2+s)j=FQ4p9pwy&B7~=g#eo`#6-hOJqT#L}ym zoZ9OBDVLC|&R>=&Q)T4$1L_&Ok>#7!M=4L5DV(tKo8A?+uoMKTRag5?s|hLVgEFQnDkng>42Rnn!_z zmOMB_SAw96I{)dDjpLuf^k<2D{VKA|D$62_Y?$MRXXVLble)y%p#6q*Ik{Jvr{_W% z6*IG4PfOR7_GowwEbmnXgT?Ib8sA9`e_PQ1xAc@2;_MdKn`PaK+Bt}x5WK-b&~mgbB%i$p`+Y z1D?57YajE}S6)t*pZiU%3(!`DQ_gq-k~1A!yP!|6k{N*ttoLaPal+HeITM*-B1LS9 zIXzW&JJRDj?mq!yV2k+TT}6`C8RkKXgi;GYH^(-tF0L5yb5mlyw_MS(_>NWG;3Y7` zY}G)U8reI4AYE?@+5_7~$wuP!`K^eutUiZzkzPKaiWf;I%-DaBfwQ_rkOO`27eqi^ zU8imUFZ`fK6xng;u1-oBJ{<-wL7z7Y{tqK2ABTJ8T_pd8?3t#HFSxdrbg=Ta|773= zF&>^v$*xAtu1H4j>X%B%UntFUGk2wKf+m|8KE2;1{p9#G{~Nk5(`p=xxFMF91K*0` zpW<|GXC9QIQhIP^ZG_Df@X4)i$ts0ySW_eIk8){{R{9}h&p_m~0(us=Z5qQK`nw>7 zfdB12`2-1{x4w7M^&ev|Ll`16feFFOD}6fO$huS|2*;6+O2jJQgl=Ja?e5jtZQ4n_*}O^ zS$Yvh*kbw9^V0D)cP91=lv49QbAUrwG&tImW?^%J7s*AuvYtq0d%ORxYpOvohaut( zemNpDUa?T5HeEF1NBIztz%@6w)U;kObZ2Wv?}*8Mu;kE2%rn2A`|>L=2#C70TB!x$ z;^K12B0xh6iYF1z*KGLp+_9eReJY@u&RK>-Bz0WdW|2dl`4PoQGz6{r}w>oU_en($!}tWgOJ9Yrt@R7 z4~49}DckJ%M&*SqCP?WJ4q&!gPu6U+=^?+KS)fQ%3G(~}sn>hTBgOh`-Z&d@(xE&j<9EyUMuF<66T zv42s(^MC{)qu2sBc)MOZ%?9{oEBlBr=woy5I4_}Of>2S3GE{S>gf)Cyit(KKnqnU<6Kta&}UIsy$bByG{{aLKnfgW~1e zn9CDJ11^)1YH^X@d0|4&;XVJSgk6lgw;VNu`t@`cl=TAAcMA@uFq@g>jQDsEie#bM z@TPUI9!fAZ=76ciA!vXduq2Gr85X=eqN`KPX&8xk3=~9IB<36=XX)^>$4Cr56toSL zt2&2BlVvQOcB@a8fG@COk!xs22M>_XsLuE6Qwud3hyV?{{(1%0)~M3vXl-i)7I+x{ z^@_=$Ct_tqyRo^+WViEUqtm@%*m||r5DaKr)f$f=gibTre$^R|%+?u;1N-Cwrxg|% znMkIgcHcKPcBXTGcQ~wAnHd!YS7Dsu{hPz~dq7~|F3^g0xm>$_{@d#h78Vl|0vrdo zOJlZi%Ix_g=l$|Gg@f-xKwgqLPcw=K>?Pdj&B%D?7so>UPcV*&tFGCT-9sV_6AVpVSa~%$o$`6BbC}Hn zs4)Kk0Jc4^++JQ@MG!%Sq^z*8!h$~C245WdbmBcAV6%0~6x7yA?boSLP3 z8_E34ooRcfa(Vz0J%4e6kj5?>h9c_>c8L!Tjtu`cp1xSTI#}k(3BE+naP`&ZuXnl= z^skRd+adU8Zv*^(#Ln=)p}}I(3E6Dn%VhAw1IvsjW8gd83%PZ_OQnN`D<&qA(4D%I*X_O4(eiy z|ILXBgH9``vy)dQhaZ0;l^(3u0Ekp~yB+#XNlV-R{tC-zv%v|7GHmePrfUHTp#o`N&IgGjE`TZPamDzR~Qpt=yuK8zaQhX&ujVNlzEk-O@d9`}}dSFn5 z_HEDcy}^Vg1P&L{)z#I_-P0JT?`n&i;%fdc7vR^U{PUD250HBdSol@Mg{@xy3j|mW zdjA<61C5Z02t+;#CmOhvO6G}s0?`w{IgoA0b9u^Cu+}PAY&NDlN-m|QM#U;>XSE)O zFf1CvfW-y;mIm%;^Mt=Oe$NgCz7W_?c8dk55*MnQi#w80?gv;U`mJ~Q$r6u6tCcrH zZs$AFd>)rhfiW?o0IT(c#6+a~Sz%|bV_?s?%*-hBKgIWNPuH=+B5trE;w>ZLs3TnY zi3w4P1!BTgYR3J8KnSE1TGIV{)F~9ac~BOQ3(T{8j$xDU)(FAC`Z_MdOg0^->wp06 zEFmM*pJM(I*cl-E2Ns(L8W0CWE|(ruI7rF}7byG z>o6<@VH6H0DZHR@N?%DF!87s|iIn znBVP&u)n__2?HYp2#|+5iw+C>VPHTA#Ow~k5*CO%?sJ*X1Ihdy_JM#t(BW7D#1xbG zoE$^}0fFw!)>ckd+bz7ywWa}JE<)8fA`}!bQx3BbSleRYT3RBG{%fZ%&}^v^JDtsP zxJ7`82^mPZ6j)I~n z3uYBEW!d?z*ITSbtz;-zVPJYk#Y0JWXd(sJy>1BK|MqKNIRcsJ~bF)=kYe%L3O z<>Z9Q%3m|QFODvD@Al+qcR33?k`=mrtLfQ0#MJL)5cslcphH7*mSeBp77fO%$6~y=PmVccOAHQjyJCFCA)Wz<6R`9=Pn{JeHfpY$&_eMjy$8AW4V5| z9b24TPrDvQ9F-mvpr_f1qK%N*4}0G&{6+N@+Vg~m{4gZJ{y-ZEjPCZ$Xg@8l6Dt`S z5&@Z8Dw~x{KAp=|Kp2@)8NNxt4a;J$im6cr2IBQXv}R4c}A#Kq%e*P9dngpZMOa%$A$RmB`~ zOx!~De)SYKW{QZ3B|Jo(7iv`;*8n-4`UbWEp;m^?g1IxaD|#4jl(LMt#Ks|C4r$HOZIN z$`1e%egy?}a)ioxqsYjaWRsjU5QH>W=P}=Cu3Yau;-aLfE1IB`0HnWxn>&9LsaZ}0 z)@-S_UT3fG1L8D%fi);3<>e`WYG`m~CU#q2Z#sJ5@qGE!d;R~h^%h`JM%};Y0MZT8 zEdnYH0@4UlA|OahBi$(=DJ_j64bsv`_s}if-HkL1Id{+ZopbIz_xbxgBH+9;@9f#J zezDf7v1L?JQhEeYDNy?aeh4(xZbOqeZUjfg23Yr>;1Bmt`T2h?8-NZy;L`&eAJz$| zuEsVHnXi-&{vk?>qEi0J6ljY1oP&b_5YCSv+U14}P;nlfI1v5&BU*19A3GSoadqYE z8ytKJ_E<(AAD=)1NwK?Qsng8_gVSw?qfdBDOnIkd9J;+n_RB4zf-XA*h;u6}GLk8f zfQXn_>hfS#>Ln%a^0KX-J>K zZ%dR~HfkaF>C@A`{{FSAzIe__&Oc7c{I+l1{w>IZ%}Ma{KWfmj)Y*^?>q6P z>Px5j*y}G9(sy&QkEnu(C|_9I|NI#{OSS)mFpFSOvQ0y4+z+qY{(4o(-aKqJ`Juv} zY?bokc^f-@A1aQ{79G(;p}r7p_^^!Ax*JE5=oQIn?@C+Fud39Igdc>;oSb7|H|bk7 z>FEpO#q6A&O+w6hYH{bEh`;W)u$ZPb)sP`}@QS!p{7)^X$K)mZJ-~u^R}~kJ%6@VCQ9J@1C;oPyP)EarK)xAOF6ouqj=cCXu%N|_pPwD zr>>9Jl+d`p6CbOSCvJ>#-CqhowC-GJ`mXGDu%zJS-{XWhJd>@s)OKUl_c}^;n`E&8=A1FIdA<@p^Nyj9 zfBgJz@;pH<;ZNCj+Y@v7sWjvY_rGX7huQXW;05Yq(xrjhUux_An5916;LWK*zg!5n zb6+6*HZJv*Z&IqF0*kHe&=caN>$p_KMwy{JJ;ty={o_P6OkQ zc@yWuoAAzkTcN_KL%**D3U5PyDlD9019u^>Z#iU&yWHp9XrJTW=)6*nSznHz4}p@) zO!>kI>wVVI>K)h7`i6#>k9?1j5Hlj2beJWOATA-nygD~0Cx$aaCFbiw_S4m2vfgA5 zliq~kXT6)wClyy~%U?YsWn7<%(fSs{2ev%&l~Mx}YROvQMob_pIl{yEX}5AJ1|(6; zlqVRED%juwMl6qs>AqFK!oBvV$Ag%KlhfX-{*HC$TquH^=h27sl;Dk=r%mT;XRv=g z@u5-w{4}?R+WOXexU7e$pRzC+bx#!h=YI~Ta1A&Of&~n`QN`hxjX4zREX}b9@|I%<4Z|sG;{atj3M5QrR-3tXP_5 z#=S9_4sLGUr~!IT@81)E^$X%3&;V{_bCc=Wmyus>hGBTh;uk-D6~7KZ_czt{f%njP z<3QXm;x!e)vc%G9#skZ5XBwLEE6w-JlF z4_`o=SpfO{mf;{~&r0cfHJB<$d~$NK0}2Z%Wr#(F5wonFZVsFl@g{I)D`$w^6;^vR zFMD65{n9`2aXh>@KbHV~sX+T0Zl?{ori-bRJ8uf1AB~)#To}<0_+jlbRzLi>10MmBfB^i8o& zD-8%Te~!|<<)g$k8%~b|!KBelxe@=-EhqHu>%YZD?0;pt*Q)nKtSpR-QckpU$sL?c z$H<`Q**DOxC{@gxl9>5jYdovwC(Tv~jG4U`JFL~eOVxh89_M#BvHkY#l*pY+!VgOK z_8Znw1BLqUT2k5}Hr1n7>uyNL4hoi$U77n5DuSpy{i|2xv$M1BU&L#jjK6^0qSFftQytFBzRrA4 ze?Q09INa=g%m4T-6EaG`@jz6@Xl;gzT;G0D((&0@Ktcl9S1SHDDn<+sx>xAI;zG#a zc`>Y7Jw`aMT?m&vjzn?XVMdTvh05(;B}u;e89w)*Q7qZXd7UHq{Ra8dG^oY&AFw?= z0Vp9SH}45MJ4t$aD)Ha7AnfDr`J-c7U!yHHY9R;6>MCL4OS@v5kI$a%5dmwqN@l^E zvap5Ey;6t4%xy^SX6|%2|0qx(S{K8#gXFvzi&=u~+jXnnCp*O+5Wn*SU6|*WwXXb- zuSVpb7KcE5+0n?GF7vC5%;_{7B-I>`7XLXuIhRoKA$*b?D3Q6cq@u8#9DLVKlw-b4 z&nS#;CBX6A$w;08W8lfiH1=mKh1O?}kIydO*uCcNfANC*%N+(#!n1x?QG5PG?cnMi z9D3aWG5EAF*p*^X+%}TV?Y_2Z$ImwvSZ-LYPOpReZGdDX5XWCrg7H@arP$v7J=-F z?T(yDE63G#53l>hD~qKpRImFx)PNSB4jCMphu@Ro*PvS4v**v-Sg^@5P2FF8MGp8i zzTe6fv66eZza2tmHY5p5cI_REkLU=hbs`hYhTB#<)?R=LyLPV&R6+MA0?6@fmUQCT zp7qXTBYsm;^T}Fq0>Nk@-*R4Z>zL)`u=w=qpB~y;CmUP9j;*a-t{!JfS=hpQtL%pb zgETcYP0qFyL%$KpD9_Z|b6)jZx)UU)m&9nXWw@g09YLXc5*oy4S+I>h)_4W9acEe(u(_tbVpK! zbcK^)UzUBW#fuibOmwJt@%Z@i5)IT-P*NJPc*)-*01!(E9u4w91u{!1DDY|o>HxwH zDu-d-pkd}rMG)1nVhTTK~pi5f6~X2LC?a1 z4v3_#P(sE}`+P%mf82zeqSS8J*Vc}E8KV&;@HDR8?X|!G%tsXHJbYnLYGAxsWi|Ph z3;#>vU#a>pwy7o{=yDt8UYC_OC<$nJi>?=sH&^)9qKI!qT{Ejt6(v$8092B0w39)F zh}n5|@{jpV5~s2fh;K*X)qRj0Df%Px(*ojGTZ{ify89^r-_hqs*pOE~2jsyifBWL% zh<@V?-S+j#@61ER0i)%?JE`w|)4D9~2Vcy!)(ja|d@Cmrf|puveYYyLGhUWaylefX zqp<>vB|!8Y$?8R1tFwJ_@=B5^{Hd6ev5Y6~rk4Z9XKEb)F^!=4k7%T{KPuC0Mhl0+ zkY}~Eca|H$Nq0$=(&KZxN%khVwl(L(lk}IJUFfq1 z22RI}4K}KDTJn3-rB0A<?l`Hs zCTl*9Po!DLcb>ZV>Gv7QA1(@KxqK4Y4%3N=iH^IAoc{>_gTxULBMO^hpFfwEx3GsD z!!HLo-EaR+j{-yx$Zr=RTwi||F8G`QN!0T~5~bsX+Sl2ht({Ru*w!sc9Oq%lNB;Hr zSVwMP5z&AjbL&c{{I>2Fe3^jN#XX-s#s-Cxkl5ctQ z;D66Zi^%KpgvSx~`Xtbt)-`l{5T{G|RF+Uj5U$9)J5m@NCQI zbfb@4$mRF1J3>YoLs03j$sqV-)8*#e(&gWRwnUvqscslch8Ix+r-6*S`zxR8O|q0j zg9ex9h-DAjRwB-Sz^M_GBta$fk;fG41uN?x&kL*9P*Aj-`I!Nh()O@FNiS=~?WSV*PHa-I~iN@t5|SM+@w9K_682E9dC`v+FH zwub1ID=Zmld5Rx$pE6}COl$dX)tyP@#kXyAzNx~Ac7QE(239uNFUG<;v|zi&ePTJ_ zVPLbBBhg!FifMaNi@PNl|>xobn1^iZ3hJP;QUa9eCkQ;kSa#X%~wxyJk6m)Hv zM(bhfWfk^zlrsF-`uv+^hh9Q#WG@$ETb;kgU))YcMr<0Lr1lg2CN~t@kNjJ=JjI&k zA_ATS5W_S753ih8v5e=cTnq{2k!;nO9tTT5&E_|-BJBG;OCPv0EDjIU7$w<`rj_3M zYrK(mOSN+;i%}eX~uDjK)Iq zN7Ni7|I~|2uHoC8WLZX1`yovR(1$uFJ{s-WS}u1kGG-V!Nqf?okk!=pAKgZpmg-lM z&eqyx!Tpf(LP94^E|U2)Zc*_mt&R7jLU5&q(sYaUD}$HeGwadoNN^9@$1qS(7vS-Q7Vi`m z8NCLV*Pw|ti1{7vV2&<4*x|EKU*AukKCN6H&XeH@BI)q&N2j~d&T zS{|l8!e9YcjvP#=IwhpL)1^k;&L2i|sK#Qt(`AG7U%7Ps^wPuf?ViHYljGE-Bj)zs85mf*(8xY?w2j^CMQT3oT2 z#d}a&<>a|zBke>zS8na0{xaDJ)HYf#^_bswquu9{iVI?`Q6dRKY*3>3kL^CG7;n=8 zcZmjiNev}aSC{*JeEloOJ91Bo(2t<8@ky6?I0$O|U7_&c44*sy>4o0vps!hCy}>0r ziZnVQtI=s8D%vH|PC95Z&;8wwO6+;WP;l5fb(dA@KnhI6hPd;X-Coa`zR7^#saZsF9+{+L4A zBZJqvhx>&__stk4Y3yKHN7vH6%wL)B%!&kY7lNe={XGrP2g#*+=<3CBH2F&l0-ahr zb}hr)PuZkE7jil6#B6)pa*}Pd-<6t}=(mH%{3OO<3x^XnJ*gnfJ(|OgsAzoKmur6l{Zw+Byt9e1>K}!?6KTpg;^uckZ z)dx^j>&L4dGc~qUSlHMlp~s7ouj7d#MHCYcd~sxtM^ ze}+6)@08wcc&|l3J32aO#HM#X=4%uYJ|+*;Uu<}L$-wj+^DqL8d}OTW$&(`%M=>VU z*w}>^dt?d;Cd=uf7NbueSDOFrgg0M_!54L!Hk@@EJrN9Ywre=oLG*lxx;5PVYzBn& z#_hbrE2}= zbsODqm7{4wGBnF1=c;k)8XMO=&6v%^#8J$ZG9<_I{sI_kZ?f@wG|RTOn2|}4f;T+{ ztGy!ecFtPsxKAWi>7C3}Bo(4j8_5~lp)qy? z=iyv6GY8E~kAB3wpCD_wJ5>PZZ8p%L+|qp4yK!~v9FRa)jO61EH4P*K{42`Z?DAsC zOIVOF#|Ue@#Rm3MGoqJgquKS!d5ogj7ZvfHU_vZNXvZbnvDA%InBu$P&eF@g!%6euLF%xdv4{k~VVT z$FrO_B0=QjL={v|$}%Y*$4|bG$v}i;a&mI?KtEz;X0CJJB_b6PT0KVk%%)dCEiW&> z`A-y_CELSUh)Up~g#O8smyevcIX8-j6TAexG}~m9UK`a{_9f%+??~Mae2wH3)h@FT zqerC*^s2A5UtU|NcScsGS-rz~|GpLUtARngeM<}(#Xb<(iz~)j6~y4X&JII6bIlZ1 zT%Axh9*1QHc_CGioLctypv+NP!PXZ%T=thxj+R>d5AQr+t}T4b?{1QWLSo8xb}|Dj zipn%|>0;AGeWL`=Y#m^J!dnkKbO3lgN&9~PzO6eSdPYyroBt1uYI#4z&9p7J{QCvv zL%=^iALIx2J>|AX1!GxW>QGxOnA?;gA+-z!p3e?C!on9?nm7D*A~TIdPm()!IL zYpV|Fgly!)IK=2(ocsX$h3%yjJ?6TL>119PXvpLceKXAQ%HuK9cfI#xu%F>UeJ^7=L1wIrUV8NK`APN3&B6OGl|)4QvipHmo(4F)-<7wjbi zRn3GAHTJlgjRnZ28nWtpum5xJR_S%*eida62W|IX$VHtVC0klal%g&X;v+j&Eug(ncqZ-@ZLs9u@1(ju47^>yoy*o&G z7f{SPFY0~sQU}0#G+atA6>o)_+mg9^zDczV@1ymiEZd2m`&-6HJ=K9W8E>n>KiRZe zP&;-~4*dwSvgMVFTe%rdr`9GxDE-TG(=^Kv8Q|E|UJv%kks9s}t<%K+*V6C3N_&>#dF%P6KjL6()3ZTfPBceK=eno zEgh*adzTqVc%4lD+WzF_PpeMf<*9zOo>F(?2}uEn#QPcHcP?{i>nGtOpM!ZhmM=ET zzVxsV(D>^3;boJx_79OLr-xwwxSzi7+2QuU6pEYu2s>Yn|m&y~`sLvPUc@^{5}=8O5@P=m~v9|U9t`p+pu-s!avH|Mc;6;NHhDV3eQ z=x?dFPaFK-UI3OHF?ZN8!{Fb)h$xdqIelewAi*8(2spn$2OgmUv^Lo8|6JDpD(c*ow^56}T7%-cYQ&vEMJVHl( zt*sOc_Zccqs&l)*Vve}S1Z;Y1uaVT~yALR`(bNxj;q7|L=SQ~4Y0P`%CVX-5)qe5- z$9%k35A!NJRsvo1y!otm(yxE8qBk8-r`EgDNqJHQgdrV_TSZdxx2hzd8XEhZ{rjHC z?MZN_WZTvL{O8QwytGeJcnxjWH^Fs6Cr5!TWpcFbMV3cES@#Zs`#1+Rj&o|&bp=4Z zW=n|^`Q{=Fw(_#c9-RhBcdqRHBSF4?`!DR-7yCD+^DC-aYJtCJ-cUEYpyw^(zA2m@ zBGxoq8X&L5FP84z4|Out$fpgQK*;I9`RA9CLWO9!MM6SyFk2~WmJ3IT`^>zltZ)4+AVoHh>FutvK zN3J(z>5Oihs1Ixa5iY>z{suTNk)U^{;SxH)Lz0@B`YSgV)z#H?FhiV9TU#61^|_!U zTcfDay=skOs(6taWAwA@^Yimxd3oJ%J5^0hSwPM`no8p}$9qgdGMcYU=LI`{Z2=8c z&G>f1CN54F6%{oyF%g)T$4pH_0~`tl97lj1N0yeBj-8{UrpCd>#{OMcC<#PJ#2MnU zJ4P01UE1IfnOi1W8bmWrQmy>Mv3rs;es!*P(w43u|J%23TCgQrLGFEBsrexk5sjA)|~=1$Z88AHx2Pjg7?nC+L?9 z3c>xJX9*FXvO9Q-d0F~^nStil)zuZRfYJ&n z2Rb~ry}dn}pQWYc$vDF9A?9<}3phRsh?$X*ilvyXt!+2C$=UX>Ib;TKIq>%7PH3b*agt1C(YZhX6Y#kH5&5%LAh?SN}{QGoT^Yn|}#~Q~5cA!1r=mTMK5J zlgAdgD;{3n*|q@mAiI1()!;zRE-%gM>+9LLxMCC&xmLrw`84(Fy2qwzr*(U(ejgSU zczyxW}RGdYjV#)<@l zJ?}od-7I7r%>Dgnvv*E$pgm*F%*52C9}G3^qYxo|5Q}^M;yGfTP;rmN9nMsz7}&Y! zB7biRE0Y0&mZa9Ml^K}X*;Cep1YB$!(ej2j=qEWvcxNp__tfiW`&a# zwO{V-Uqf-u`5~~Sf+Vv;?cRNiSfz^ZpxnRZ}dVT+?ax#rU_m)w&oXJBAxgOoQlHN92F0VA$uWRyAb-G~X`^lKLv z7jg(G9o?|8iAj-{s>UJ@E5a+_(*pcH{$5NhMFj-~Y^r6AARa2IC>UcQ6R}(!kY?!^YP|3i;VzBGg*V*|7 zhldKdXD16uKyc`|eq)fxV=MGST@BE3VNmV zj4CGdaw)C&%BEB=*a{!S17-8@pbepyFwGQP^jH=7q-wDYWnG+(8<*~<;XYa|`+>a| zsvlG(bYxCUA7C5jR<^=SqTe|j<2>b6RQ|X}5I-x_}1196!x3}^8AHSP|4eRvmIV&r6jF{vXMHz+*g||(9ImYTvC1ZF=l3(+MkiE_Y*{Jo`d2gV4+j*U7ykl&wL%il_x=u3 z9Z%5dHMpTdHa?lJOGy&N@w(q>y6Cp&8#3ag2~l`eUj|Ee8oDo+?u@mNh;S?s%2k#- zw&8O6f_Ps-UJ0#$BFR9PnM@wj@o7%Zv|JdJXZkH?y1;4hWce7v7g-hwJI*< zOTYShK`(D_$D<`tEp6@iTLv~JrXb&T9VcH|*V`b4T^@xR&5Cy`A=NML-h=LP-~9X_ zfFbCZoFt(W_hI|Y*_9BnU-AD=^GQ*=pv@wi1E<6JIT#!LV;e8$Ay$p{r723yGF=%R zs~5mMv*NLh*Bw>!)!!dUr`8e$_{IzQyu7_YRjqG;|CIvgvi{Xk_6HUeV0vTV;(`Qc zlzxH$QCGadh5bRgH|MiG^#T+hD6;Mw`{AWBXmw84MB+0RlMNS|)@3P+FJIY-2e0z) zkLG!N@UMz=`{1~4)C$bBLKKY+zWt<%POB~NyTma2pIkY0|OsLQ1TV( z-T*_c3fV`brP70=iG;$P-`gEq-~pNls2?J6Alpr}7`xFJJP$;d%c2d0yG_Pa4q&VP8=a z67@^wCxv4Tv1MFPZuWqS#HK)@uOp&Vrdvl>yW=793JSb@e0(*4m(09+43ND)pY>Ek z-@rg6$cwtk5%-(5e{EVa#{)uF0Y;26YAMfogpQQVQkufPO4D&nik5rlu zlRo3+e??~v z8Y%e>d0T=r`W;vuVd3GLmioe)Ens|^IXH$uM25O`l`vrqyjJh@_0umvGrF$M&d>!; z^=5NM0f7W|ovKhv3kxn>T-+Lq(I-DsQf{TMigckqX}Hv{Vi4~Qqx9DZeB>Q zgI2fy<416qP!&A^{leb~g0p@-6NOFF505oEete?(z%qgSy+CQE%0bA}WvnAz{6Vs_ z=h1;{x&n>VUc`^VV$K^c*w|e)6FWODxOQY@M8d}A;PXc_?|6_b0PgDUd|VkBs*&)H z(D=M(Ztvocp}Ok>3bZ zV{e4OKa#z*w|21fk(VzJ_*+sPKBS@u*LwnB-wlA{`Dbyi49cZC6>fM zrMf^xKn z5nztv;^hq}E@smx(`PRirHu{&Ze0m^c}xJ#I=~JfZDru)B?7?mBM|zt8MHL%dYWEr z+_YGO^E_SL7Z2k8a3`pfaAQ)^zNZaJ=rC-A;PL;)2AqNq$ToX1Rq`=pVnS`9`3o7a z&8+8hcf-a*-@@r2z_lC<yJ3=Q8KCFx$U z%`c6C@i+nJEegbLxdqqR`Rv7HW!52~WN-Al2=(JIg@g-hTE*-qfoeJY88VZ_+PqpH zKbn9Z$0O6zvLL7&@WoVf7!LmVll1QH4srg19;=Coi6B8qhkkgz)_ycgI{fW^{P*uD zAnrbP7lHUiMd8tkd127d(7chAm6DYWfi41@dn>Sv1B@oEqeBVc8#vF!Fl-xZYWTt7 zbbVnnRh%U8F%aBX4bV0*fmfj&EPAd_y#xr%1p*MFg7mC&Y=%Iu$Rji?{%14vfiXJx z{OMxeSONk9z<2NjOipk!Js@%6#d%;OupRXd4!#LM!;MKuKnDvJ)JVYEcn%aA0o=av zabi3?Jg~t_1Ni4REMSztUI{L^dkJB?2(4-WQ!1E(@Jba$#gcX})4xFS1-qQIj0_s!lu^efCMOB0s0KJIUjlfWlb4rO zUQQ6f9gB<$dH>-9@o#feBx!YoOQE$DLd9?6mzl{RK0;8P{@4PnC{av)$*%$2!*ggL z!hdRhe9XXr$&Y=oQZuqsx}}}9kb4%_ExYSI{(b=NX}z#n?_aJ4l1H6QVQ5n14smhE#&;% z#h-Y4jO9Q%U4&4J-~HIoX1Rq1;4_3M0TMsh#x5%=D>L)*{-fA|PU`f3+uM;-1ngzN zt~A>Wk!*47qq-DI@hQyQo$ zOJZaiX5)F15`B$I!+{-vkq8YFR|e#^yAv%LRXErr)CHG;eU1zC(o0NALV|oUH8nCb z3kJIZFu~<b2YZ6)_GeAeC%j{Qv^Z)6CjZck__})yfDFs*W2F@ zhUjZX4MLxM{8&O+8Q=Tn+#g^Vx%v4zM@G=b4^R<7-I3B6cC8mrOXKL71q5n!%T*RV z!7dtIJhW8F{oB`|?r1hd2`vL9s>jF2!O5i+9Pw@BQsx0s+2CrOK1$x9{Esg6Rpu0SgFXa$eh% zy-RDVPyrc9z+JslYm5C0R=^TXC)$p@e_dXq2Z4l!Maej+5$>k=gt8`|q8%wLHGjoPSLDv_l zKiPXydM%2VchH3Q*JOp!0!2dX!bwTcD684|0zwR?k7Tzo!@2D5k#gd(NC9o@_%I~p zl3tDhKyFmdM8t60I}JRWMZD~RH|U^F7BUF^>6iaY8B)^7%&$c$(~BR1F8~iq->TG< zS2%Zl-(I~F$%Ui({oib(!*waAID1+GYD#2?;j1ZHMw@BI4c9k8hr;gvNZsy^u;AW0 zW~A+m;#tm(8h3|!=jX47`xHi;fj9C7b-itzY=_rdyS~ABQg(Ta5-_3d{XW?vH<@KKGpNQ27#sv3CIIyk`(&lf8`28wn15#aFwM5MV&dYyUXNtDlU`CnjscsA!$qT2 za{0TUply1(cHtBqpEmR*yY*E8bY%WF4K4NZqssJP2XPN|`umeg&AO|3nZy)wlUq2) zS;CQ!D=s!OZpda}=V_lLEMECW>C4zMJU{V(1vf?c<@b0NSEmPx=EEfc;k+PLSE{>MzlQ>e1Wnku z$%Hd6h^MyWFK8IV5m=$hW|m06e(AA?6)BJ?u=raV%-h-$UwZJsw~jxnaKpBtw~0dD zXufHhMw#S1gI~qTrl)(*8QIvcLGQ-h7@H9Ygy1m&1`3K4!p}xVeQ*d;$gLx?}XaEhfs5ir;;q&z*?RfYXK; zO4LSK&_DM#@)lYzYc6_pst@Zy7oZhs1Ci>ZpYYpPk5C2{%8Y~^Uwz8SHJlp9;9Qn5 z=?_JUiRCc(TzUg(1yA{2BZfprJkwiWewpx;(b>U40vroX&CLUg<3Ksd@rf(`-X^{rA_!C0BKJIhxe6U#JE3R4_xLwo&6+S;^5ruGWR5{_A|fCV!P!|@Ow6O_&kLMP zPu-K=tEp{3eRmFnLa%`1)`O?1nYPvT?59BqNd`1$q=J>j(~?$`Wp>DdL@ zQB-lM)aE$#l?~_x*@@|hRDb1`;y|7~V{7kdeP5g=u41?t2g%&93NbGDV)3R#dDkg$ zQduiQbL#VdM1#9oxj}(-<;e*yw!S&-J zg-rpcd7D?E5ydtU+`MhEeEi6kg>SKX2RMJfJR%LDNEA0pY<CF%JkRqSFw|9Vn52? znXeC!xXC=LZ5tSB%k}jF(?15fQ1ykSDeNY+=?*!y{sKb$(}tu#pOl}YZgmqM6BF}U zUmu1pdn^#4U>~WB2)W6ZYFQhq>`|k`KR@cHm{^3gFqFllBt%o;-*IORnmAL*y=|@V z{oE9F=a*ogjr3Uj)Xng7fYsG=P4E7-!i;jU|LUZXh0SY?Xj-=FqLzu&W0+$}1f<{q|#Zbv43b2y_gM zoBievH3hcCYhlFU1@11$HryO5XaA*&bjI!w7xzTZPifw_o0aXSi!3X|kI@$M_Gb3D zq;iC8t5Ux4-9GrO*O(9xqb#KPtmY@BkayS&y?Au5eJ9(vaFEyLc*}iU;ml=Sl_VjE zogdW+p5u>0C3>=edWS|x(q^`iBljyMx=8jO&b7pFntx(g-Ibdc5uj->a_#s=_{<&`Pk+Z1h~)B(U9D z*Z+22-VbQYLy7sG5>V-@|4T*jKk}`dB0810j-N{M@;HPN>A&*vghn|72ZvJg`{>cf z2sj)_a9Z*{=f83?o8Ko+mXQYN z8-(sx<;GoDu-jV%4QD-7+y;(^C7nn!_uBN^gxlA)Bk?2J!Y=*Cv3)HgZj>(2?^q2yG+Tt z9Tid&{7s@I*|YG8=6o}X!CG&S@Ubr;p0pqZ&dwBOc)s>)6{S3lOBL7JgdfTN)#%C{ zEYEP(jE&2wM`(ti5kYToMYLU}3#?%o`;t}G1$x=n{WXi@oaC|`cai3x40T*Wn_nr- zANQMQnYLsXu%iBxR`tARJyO!P+mzs!AQ9d0hGe+ar)_C8A=Bzr_jjAE|4Cx zuU4)?qy^~5NvjwY6+@NF&a8vmNLpVIe8c3IfP#Cx;qi*2krTbOy!8-6Y1zv!gZ;+a z(c9irRQjD)T<<=1k%FQ)M_SP^buFEg{kq%C78fzZ;out!__-sAJt`GR-_^1De#TUJ zkb;*SU1LZ}Nnb2V?*?5-*tchOa;mx_y0EeNB4k)nKacw#wcBfuHL4q(+H=C1o+#>h zOg7ES|Gc{2Ke*hBr24+o0B-aE%iNjE#688vayVXKXLE(qglK4MeqmF;awpKpD__sd zs&S6wE#)@In4Gm#3knJvvCP6V6{L75k~KGh-H6=nx&Emlzg)c1lzc0c=0SKGFbnX6 zuCA`3r6nf7$3T}t0~F?VKAo5sd0}DU$kbH#;e74&05ESE85tn~aE3$uir{D&x}6Uw z6ZK?H;BYZBGmD9j@7kLzLiMTNK5<7le1H7-F;ZqA?gcPW_&gaMUG!@J4uD(_Y7oE+ zy;Ft;+BX|p5{(22ARu&jboG7OQ0NO&gOW~!iKfW)zW)6YA*o{Z*P39gDn7M87KKcu zcl$lc%f6S)K#K3IBKZeoC4w0|O-fE-bURsir)NqZ^1ydhUM}xkVZFxja0@#YTEaBZZ zx&=nKFh1+=Gi)1XJm%t3+j+H4sh~Sw^bfYtKIvBKsIkbc+}15XbaRTT1Jnzk$^k_p zRPn9%n9NOZRKm-zWIDE2T?swWIs8b)5;5A?$z^&P>D0geM`<0iXfxdsbj z=H}+)kU!zsVDTr6@s ze3$>YItR*n4rQ`|JnAs*r` zSdVPkzGuuciHT-%$i3hu8l`vn#z-Z0gvZx)HrkR=@nk337C>)I>q|I;YR1Lk7*bD} z`x9PP^dxP;S3a8@H=hrmRULhXpOEDM&w{V4-e^yqXwAKkd`<&~^_MUleSRR?_-D41 zj(D>cAI2XtmL57*<3swRb^uc$x!*n+3=sV_bS2>dp0Gh&eF!<;kbdQo{2Pk~Y$svX zUnXfXbbC-V=j*$*U3pk>=DKiF za{6`(Xx-+4F-OA*K4K1=2{AE-V6lO&$(f*XKfw0nMom(w$8r{s0;=ub@CX*G=QxH8(f6D=RCD+#~`#Sffsf$-vp* zJSiz9Vc1joW52$%*9s?ZN@H>}$3@FaFbxr^JGqI?P=hxcC)BUHx?;>pxHkV5*(u5!Vo=Cd zCJ{lhKA)IBq|G$548Nf@Cy8WIsnv`>>2novs7|v^4ddCe-fLX_qH11dBjRyKp4RBb zj+qExZs_4h0h8mH%sbQWG@%uky=-RD;^S?Q=m7?`JE|g3iP+VJe%v59C*&){SfJ3R{mq1_;QQdUwPn$p9O zuex1$3n*OYdGl*~gK0<515@Pw>&eP4_IXkZ&i_LHzKj0@%>N%4fl~!IHU!M+a#!I9 z4q_U^bx#an%JILUZd}7;y^TB{=VEY81Q|=<&kIG+eG3Hw00;xi_L;d~em)BhF7D`W z6-GG)1x8j@g&Z$1&n(7rFd-6pdV2K3fX^NU3@<{l1X>}W%r{FVel)#_kBNZ*WdX7R z|Ja6a#kW+JaCDR9U1SCV=kHK9e(oUnzKwQdE+qpmD;31Sy_%25eHH+~(2UZoXYgGE z!JM44N2eM;Ys}!5uQZ%K*n*EyJfkp6H+~Npf=eZrB~@fLQ{&DIh6jUvoaNc$pKhf) zuyJJah-z6S{T4az8m}wNh=XNZWhMKb_3?SGCVNdbXSg0L;_HUXc}6w0ZHdi@>zMJ= zpUL{&>SQW&v~Hk-TgFl_cYtE~zZHti+n?57bZOpGD%vd5ri!^A#WHjTR;` zKmJ6CU4pGiRi`G9@yr(A=Yn=zqpriKkmSa7m)<4SF_NkDQ2n!)6Aiv@-G7$0o~C&K zE}TW#(B1?W9HGB^2eKmVobTK=Ff@vFaQ}alQ$)rqOlR}IfxE4*-S5SS(D7*6+EFv| zbx~0ttw4Q`jJN-Z!fu!0(e0M5(e0>o3n$ZKrU{SFwW6!1zL#&G8(?6437JQqZd=?XV_R5jGqC@!s|v>_ zzs7j=v4V||r+)@$$bhafND&lwRFZMx@HoBp!q7_LFiy-9;CcgSUh|usc&>?!Q0y>Ck2A1tq6)icx*&C zIFiy+m;6UqZwbKUkSseOF5t+j(L+2U>e>q*b-HD9j&@pXZH`v+R_*a-`IlL05y55} zeW?A{Z(pe+=4u}6kd^W~6xfLw*}{7`F`V@-J6ziyN6;+npW#2HUXQS5bF?jMSpUB7 zzS7P&R4Bald|YaeqRG0iJY=>4_d9F#bO>VfQc&awIIOxV6)sn5}uZ7Xl928 zE=|Oz`DSiZh6K63zIH!Z6TGB-75cM8=e4|c?%Y{`Eel=$Pl|-wF~-0Nb`ZJs6si|F9M185GBaBPiRg*19J5BefItuQ z1t~i_j;S)Tdmm2VTC-VdqCn92)z%3Tb3iEkcpibu6$rwn z#(r6URfx}fI5^*cYvWVSXo(4x|7JG&W#>)DHUUjS;6~Lu|65izbeA4r2P6c)*r|JZ z0LVU#JW<##RPONMq_F?y-Tv!l!#@frk35DDaEj+FtK2H|tipBmMGc>TaT>C4(h{=Tx z=g>C77HS{~s8+G3Pfd1*mZb}^HRo-0@fn^S(11WZCMGUw(f1A&MO5$q+JN>R#5-B? z8Q6KT1)A>utov$meLf(_bX~PVN!Z(3x4J#jJTg9f7=KmKjfT>I8TGl;v77H_U5)1A zf6lU1O7lsYGgrMduX9_&)(_;Al&_m6oO9#V)eMiy$!%RykRbX&LB_pY*biiX=~$H8 zJ$8u@iqxEtkX&eVh!DUkWjLdWeinHw1Eoj$htp`yuE80}JO3NcI0! zs3Vd~|MVF~8V+vJWL_Ncmp-7_tJ$-9#7p?|zfRy(kIBf02FA9qCQ)FIc$dh9 zq@$zb;NlV(*ttC%5Bvd!Kt~)I9}lRida0}XrV&e5NbA|PZ{K8eB$a^iSdKh!3w=$~ z`>Pwqe!p+s2Ujp4xvXU;17`^7bDSPO5e(eJYGGzA~RX9=t&Jb2^9E9Dmg=V_}EAjCu+tJ~xG^-=bf z%{}g@W7x=hmCj=yJtN*WNsA`p_SE((f=YlMf|gFChn2nO;CaPi=b z%Nw~j6t;ZD?|B!-04;r;ErV-hV3h)VVpg|-ncn5Ay&g?oRHz=a-8;4uc=I&on2SIi z)2Bd$qGiC7Lf$-rOQAixGx8RFo^)7Z+l@VFDyRrow=gAoKqgu|+}BU5N!#!moRP13 zD5ka6RR7PU(KTriBQ_z?tyJn8yeVaEw%*3e$CFHL_XiK z{2C?>$ARqpnx=$i2t}4IHXuQQarrw$sHn6eDI*n2sYc3G7+<~OK!VuJS5VXo3TplF z#I53+INsBHL@B_LPJBHwk4<>Q$Ypu@iF&4%4A9bAd%AbMU4NH)z^P1OnO(r({G=twWi`mzQqPALdwj(L}kK!UN0(6H+=STDLKAC3f zMiokwGC!K0otbL!4E?$^{lMwWQr^#C7al@6EmaFT1xbzsR))Z>Xw@%q1cfoxMIa=r z>*yea;8^hCGJSHaTamI@%oov_vwhAoH*882A}|*usN^*4!yOp>a}n7g35rX7$M&0B zII!>nwhQjFTB}?V(x%&9Q9l-Yhj#Iz&i;Zp2I|uOQIbUe4(NtSm;jXthDvEkF>jJwYLCIk_hq(nl?GWL9M@Xh-WmG0JGnw#5@M%Ok%BMfKQow#52Tzf}F=g@Wc}`6LVf9hdV7!FckMth=Y@ zDHtDif=whinEC(LWhiPYXtVhdJuy*ok3k@(=^=M+H|YjrM=%{iK&^A!3i-b>U;lx< z0Ok6Zg5t|{``Z4;+#|$K=C}Odo9I-@N*&#LSoDQNH#-cW`~QB80%0ngzy5!DgIbJp z+zg^og^R1t{)y|EZcpGucxv3q76S$WgAJc>^s|%M7Hww1hj>c_aekH^x369$P1KIWSAq1h=i~IjlEO#n zv92<=X%;sB$jCkG)XLxxzooD(EL>Miu&KeFA0)ax8{$(TsSlK^sqs?v61RS`=7ngZ zF*(eoZ05(>%0VFt=eGnJKy)XE1dnVm2vCUiVqe)W!p*z5-@bj*)+qA#7Y#wFpYK04gTPo00AlT$>8ru>B%M{+SI7h4n%(zfT!S^s$8HgONw&fcb?jw$mXc*mJw zy!%;z!}KGC1N-M7@46?;2UG0XLU#D-G_PuQema9(BCuPXI^jBdkkHw#|J1I2Ns-|V znvC-sY~Mk^8O*|S6YGw8QFhp)B(_qdIa5xBLcPi!L{Ezr3Rj-{wITA|4`P#JP4qvO{HsK8 z?%)|^vOLbzy$HFB7a9WDowvj+VK;YvS6;JewbVGn5h%Cwk0@gi4W)h^1%t*O0ajZ^ z?oc-qMAngOFR{JaNkd;-d+a78O&-8kY*8ALHjXmN$VvqCYj|fnTZu-yD%jsI z80S^as84}h`v^PVjg0N1kGQm{Y-4YkxJ$JYHg*4 zTf}ktC->kUJI~Q$xMRyaZI0BIK>@`o-{$u|OG-F2Uwpl@aZ%&?df2!-Y8|rA&)0da zC=E7OMWt?6R%FdL=vdc}He%#{=9=m)$0Dbo(%(p&%n)vm6A)H-dHvYUePZuEVUow; z4QNTOQKo>R;U0nlTzeMN$E#N^trv=+&& z_J0#gV7)E?aJI8G{m%6pZyl}arX0Q)sFu@u{~7Lu_9;ns4#s9APOIZ?1X-D**+o2( zA8-jJQ^Z)U7e-JhC^IYG?t9h3!8LaoU*+2r57c>fAJ4b0%-4;R&8?j~WM<0)Md-h8 zE9-TP*CKMB`YX))POcYGv4=vhXVN>ZhQ5((f?&Bw)5|a}*Q}wj@rlM0G!v6W5c}RH z<$GA$HcSmfRt`GetL=4K%2^>e9<(8ia={%*&_fi&Cc4#L9g4IhX#k@88e>EswF|3G zXe5vR?Zw-36V1^pzrGC&OWiB7^$yp{7*&xDts4!z!`jsx{bmI%pl zd1hw40+_jy%yI6RZSQ{wk^9%`k_G~~3}}SkzRj$$(Waw&_qAZU8hBR$FMTjjs59nJ zPUFq19%?4y*e_qcUE9OL#_|da<3!aQ)n0^`W`?<#q~E-Nq7)Zrcz8!RnZMbX;(b`e z;m5PBULO!4@#Er(RI|}!Dqvo=wjPkCeGTvuL(Uq>`js*@N)oNoaq)|9(^N`n1w(hZ zK6F0%w*L?vYGl2cVQU#>B`<|~j3}N0I=B_7NgvMxopf+|PbpT@I7{7%Z9OQ#X%TE0EZJcx}or~+lc7}X! zrXf`2sB7Fpbc-$6UzR?~{7w5lq1k9&IG#CWuj@>WW|}sjR4%0jO1_NZM>6A)a*tZ_ z;qI?3#*j)`joPEZvtsK|y5X#PdP0SQM(I&800%MP2TtyA%;z}F^s zW~hyz4EOMJt&U$RxgA$)kB+6 z<&2liU(~-hbMCb*cUbrboVA)rax=XKZMIwVA^(RueQ7i5zw@f&x=-D=4{j;csr)IO zOzoGSCcEkt855)rMQL9}@iXVRDz}SCMRzhk&cd@OvIszW2L(MA4Db8k%GpMCM3ki1 z?tZsfKPvRp?JUNjNjZ;$Kyl8i_2-skak|nQdxAN>GJ{@8`YXTI;7$nb5siSPBq8v| z8Smt9)Yx6a*j@<^|J`aeJz!n1)_8HEjD#LHp&q_ z=?l;vJqs5Re5YGh90^x)VsG;QsxDFz7tavOJ2e`1Gz%z+;#nrWli6t#{t~i=O8KsP zoNa^XRm{a5=}gJ^&zsvaciJ&wLrtp_rLhrCLng*cLD}=B?31K@oxs`L&*s*ErXPIJ zvv{!b=zQO8j=WE1BfJVX3&Y=a>8Yk>wx@q^PMN`24A1w@agi$-RV_V zJ~2JU%D_55?&ab-&)K8!<7YYX#tPb(HSheL1za_IulBh6Iu<=i0v!WfNsl4IJU#z= zPyQ<#IPU}@Zw>0vzkent?SrSek_zfMVihF?1z%ELwVy3lJhonfGd6EwcU~tiLfcF7 z_?-7w9vGqV*e@#h8G~r=Dfl{?7MKI0DGJ10;AYSPlcnBftT6YQOG|AOV5di_!{;99z?_01Q-;#QK;R46~`;S` zv#i9@mq$`k_|Vqy2@NF>S_4nde5f|XTRjlIJPAow<;kaP zQxW7cw-@9QGhgt=6Ooi`;~x31&x^;U^+Z~`+8f1yg%OvFj5~-A(q-1Da=NghR{V60 zc7*5(^|AAaoHCci|Hw&BT_9^G=WDsb`=Y1wPdsC;R%x5`76iG|$`>8IKT%}A{h7Y(djFGV;zIU#=9XstlE?EsoNZ~_xS zCci&+cG@IuI#Tvvw9*w51%_dW%8+GP3D%VpKkb=2Tt0LYvI@^Ox3&_%lmiR~7fI9# zjB2I3J34BBOTWJct12{vhEGW?0w{;1>{=QtD;LMVcq@jNDK*sBUlck!z6WRxhx6vN z%~2N1zm{Kc-W4-nC33T?+JA21Ujy2ofBMg7`v-@0YsS|^!N!N0hBN|O5=~dhoLg?*a4T|;U3jr;ElqY*F z&?}*FDLYXl`i}fHP`v>T#{t5G%D0!7!S@306Psq4FRZ$YC^>m~dPc^2=??&^myk6K z4_9>AKt!#X+R)MXaa$p!m;m)IpEAQ#6`;?3ut;2Q1>6V)gi*gFvCxPmZrMG?e@;Q| z+N`f%ZE7*}aY3;*|5AoOcroLQSS5rKulSBSJv%#r=@7cp?m~+w=pEi-Ag8Ahn&l{7S{bG0hpf+wSrOJciyN!aAp;~ii%)BlOTfEo||z&uf5cli^y zCB8Wg&Gm++`ebEGlvlqGFf3=bDlZuuXUumMIp#ZfXMPW+OKOzKO`KK!&MULIVoi*GJfZ4kYB} zYNeEc!lM>=+KBcHi0~Is;AQZ$xp`tw+QXx&!<7Ry4mLi3$%>@ZhNTbX{VJp7wZnqd zca!Wr=#+J)e}*ZJ{vBO}ye&Np7zG6Iunl6L&VW4X5RbgZbM35wZjg8IcrTLexLKM2 zNUCA78$F*{X$iWz;ye?o9cJ#9*{|WFtn5{Tz<+e|R9O4idYAwD4Jw_1^$C>M@-pjd zUX4mO3e7UovFVr(F~J0vpFjIOzMp!K&&#KCS~SiVf-6(%icWMJI!qreH-3_b&|O0FC5l;W076ASin( zBBJOl^sh34WSjeeU+bq{a3pvH6m~YvePY*PN4?$QQj`}o)hawl`~_C|!eS#ZEb}l2 z%3aB*scC|Ig1u0u;lUOot~#MdQV0&c`|I;SK_^KN_1pf|cJ@ae9<*KEU0)k?iJ+e9 zD7M^SKcm6JMLYulJgVt6ppiQE_;R+d_F=Tr+UiUYsAyAw^psIjjJM24z|HFSy-aL% zusI{1HVRTK9pG`#17z(d?QoZyyQ#|kh$rug-SPkt1RpU#ytQCA0e!X(X0Rq7o}BEB zk>7iT3oQ7Sm@l8BqjiB64?VfZ5RySm96&GZk97wIXSAX*Z#fA}vhfR%Z5 z(c(-ey#q|a38 zat~SxeF^$v4yTg_r!JVGP8{HW*;n?0YPQOdU0zn!U}B9?6*S``Tf4gELxqNihvt83 z8ELasw`N%dZv@{kTvr+YPL}N$S0xE{^$SbI24bOSzu7U31u$Hnq7;}f3#eg z5coE=f{DpQFZ51hdUeBqp2&-L58lWUmm8Ud87VB|5k8f@igxo_jKSkuqk9BXzTqPV zkMDc8&wUZNyZ1ZiUeJ-jYTH{-{S>ZfmOH!v1IfiW&WAxjdNNs-m)+gnMSGMVNeIz# z{gK_updvXH6^^j5@Wjf>9jH|bbJah)y;u>_F5*8_laaXwIT8zG8w$IKQ7tmS0TP0b zi#%6IPTVD)K2=a{_I)cvpJq5$!BzYIY||i}A+*h*Py<{V_&g_d z_9Q2(*bMM5@tf01p4oj{3orS(-}QBM-=0WsXYIG9Y{N-<0P+EVk zF(kG~?%A*DI8D>g-ni2|V z_47l|&dE7<2fcIWMb`E;3!ojt{KH?CjT>4<-f=)mCq_~TM|gZdiv2sSKu z5#zxYQ^?bz0g4RJ+2x1Gq9vIsGZiw>h>3}jzXv<=dsT&LdGgj&cLbX6*OO`@W1 zrih7*JYE^(O4q8~mUhI(!SfRfiny`5_hYVG$HYV_)dRWG+S=Ony*>8bS{?xbnjkWE zQ7bE}(%&oI=r|nihjtKy7@!Hp#l?l#!6_L8z(>=q_oo*Wq;*^!ew~qV3mI2|QYkbc zffWz{EFBmyoB*Ob_)fS27o;vAWqJZa^ua?I;G>~O*VCJnVcAXv=3yXZu@`YX0a*|xNkB&1VqZ?y_G1CK z5(ftd$Z1*ZHKB}3SI&d5-Ms;GVDx`|dSGR5kB$O5l0KZB`&;co7f`Tn8bMbxJ~^3a z!w(*qjVwc5Cy%X6;LbZ4wF>MrVFJNcmnU4 zVrlQ$+Z4qTvb{_#NclN1kR_*ow?%Y1=d;oLvIeUF_~7OK0`LnabQyIHqhD~{i_ibS zSfN(@WMF5htIDwbQi6}NdV!tok62AUBAXRTezgMB`gTim{DskUeN7zN++S8Yo+wn! z@BksSM6J^GZ})EXznPjJ?VsLT`=))8=9*w-(e&|_ytvu-WEX{DY*C&LJ z!66Va21X%^3(6yQyHnDuS8IO$OqXnmLiT%YqXYq$MFuWcZh&um=}@=Ml&BNWq@KR@ zMjg6F1*!Lii}fm9RZ2NRf4#a2X8U$E=K>I1CzJ06#m3TaXanKT5<_mtwS zHZ)8I*i&&BA9q)CVFrvm62QbvZjSRHucoDZ+ezo{`%v4{N8K^0GxLSIo1norCG(F+ zmPP-e8IEBCR1Nr+f~ZPnS$HRa&La?1Sl2+}2L+z|q17l1w4#Pw3uQI^lV}^v$PLuN z^Y91I*T9;<2drHrKXc(hvTndD1wyb75#$ChU)FSV$h6$!;E*XJO9m?rlcmcbX?c?1 zRbKA;#@r-ROmi$2owS{bLkG3kpBIW`K3&Oab0HdRVjvo&yl;epC{b~Cdi2w6b(H?a zw9Ui5T+JZhHij>d6Q2i2r)CQ=QBoW(UqURLa(uG>S(KNla$b$A7Xi2@c~d?x(E*1Q zth2e6#=k^@IE-5q3QWf?Jdl*+cs1;F-_GI^cvhlna@{Y?j}+4}SdKh-Ieq7rz&zN% zF88RK*e<}xV%uFayM;j#e}8{+A?qvN-cqKpsDKs#fdmiLbM}LefdQ?ms)~byqvmL> zI?s;S2DX6J`uO8p1M|IEku46#CnrRdly5?Q|+G6A_yI>p!MDBs!44>%$g3i9~r8t|k z&MaxVjNw1u4XxJqE=KWNdw<+r_9;!}Y5_BTdMjJodN8KVcRzAiU>_GrNll%ECG-da za{lMH(2jlwVpuwWW=d>kum_7R^S;r@%gdXMlqnA4U%vb}N3Br27lsh?OMc-Y0yrk1 zs!9-5)7FLyH}kJqX0Y!`RU6*30e5n^xOBV_aS6q-`=AF!nACt^prg{&3R%Fl_c9z~ zs2Z^D^#*p0aklW#(Vc>ajqp29!+)5%8bCQp%+xOMX3^x^8xjZm<>VE2 zR|km$(DI7U5L~(xFVb3!LT$}?_z7IVv^pF{KQ7ZJv$NkxMole8Un{U2d(5(Tlbej- zFZnPgO$~pe80No_GKZ-kpmL73Zg76#w z+7*+X$5c*-KL^0Cz-Xi$_J?jmAkjXIA(1I}Fuzp^>R6Dw9CGis{(J;)DxPgQSYQk` zz-4y!sE*UOfkp1OpF>jy4)hDCh=>UBJ(rL6Z&X!7Ig#AKCpa=&I87?*sQMu!#r_&> zO;rNr`GcP5f`^6~LGWeFP?*X}HJWO4G>w|(n`ZemG}NckGQD4pSz&+u_3ps7 zx-sIHkVI(mufJ3MZ+Gu;nr|AGI8E@wYDQSCpsIMJEG@k#rq0fut+^#&V{2=@P=}7h z?NcxJ8%458FpmYWFq!fSCU8Rs$m++CWp(fkiHNJMkgoF~vlW7sFGCFRN1T&G>4Zd| zKD~r&^2@|bM!nTwjttT?G8{5CR?=sXJ(9ps1sde&0ve@gYy(tc8W% zSwg+sX~y?Ck)*~KzTa&b^Bw9%W&209_$Aw~8r%4(l;& z@AvQDzZMpf05gcJovseD9c&Z>6O*X9Yk#gLF>IhiL&@KIa91gh6h2l~R-$B+4f$ixp6xo|}Wd zFGy%fV`F0zVJ9KK3fY>*&zViKMN8ul&PrABLnND42&*g_`!0LsY^GI4La7uko50MZU4353OR22-7Gm-K97HLa3a^$H=>@46J^~z^R&37Byh-**xDKPvM05mFfat!y%#?|8G5~vj zmU|;_2RGJ#^6|U0hqcapxrAiSF6IgoSI^(Avo*f;meT{4y8p_+NeKU6GVmAP!4NRr zxODkH-A^Yl#Q}yjSN*peK`?zgUhx2`j*|NhY>^kh#rV1XiSshKw^W>5>DYoiv#y0% z`Yy?S6W!UL$55Zuz_wSIH6JM>fh7HG;W@*H=H$4&m7xIOdg-l?*Rt#leKQDZ+(aVQ z_Uz4W?mz;Xrv_|*SK~qv(>+2fXF&(r#esEYX<%+e zxY{0eLdL?y_07mwsBWScprPTl zdj#mT0(I({FGO93<$>3N?uU=GwW|ksAiqQA9{?YUC8e6}gxO_eovPstJipuB;lbU^ z(vv~#hP8da@q05BePBsTMxQ?PI5;rOX$kPZRNwn6XBhLt$Ed(E`=yAIv7HVX$d#+U zJrbrG9UUDgvxj~O_%w7@d!F?#`Zl+>2ZV-7=RcZ?2FCLEp(3%6`C#C8L+|n4GLq$N6TPWJ`>v|BeLT@|I?w-;N2=?cwa4)V zam8wh8-umvHAaDxF|Wm*=eU&P!GMex7_EAHRl0TzG!tp%KrVi0Z(Ln1MJVfkmM_mpOS)M9LW*4eHX(aYqvP8n%qpn@Zh#Od4D=Ruj2}f_it@NvOEkNcX#*BQVBBaiNy!V z|8(_CSmf@`Fucem1g8PRa!GiqV$-@WTdvIy!g^^N)V3+%dn{kDlrU=zccT`Q&PX$~>{Q_Fd)Y|{oe&!sXDCK=_N=DMCN%)e1ZEbo z1EscUFvBofX$G+J1~|)Lc=c<9jcH|L)5Vep*sLCykT7s_E1L}#JXcXu%ZzE27Cd;G ztyQUpMc)8BzX1|IbXn({3wsw~#fvDV>P-DHg~LU!ZU1szm6kxhZM)#D6C11a^tI{h z<^f`(MD~k)Byhr%So4X)(GmDny*TA~!1uZ?Ac}p6kU08rL+~nWc;uJohuYOywM)fYf~3VsnNm{?@R1z!CKy%R@+H)R zdU6s5Cb8}6jsR}(_o0H^V_+aEWHR5n^?a}h&NJY#0d5UvMVh?;JEa-i5twKD14xbH zy`E5@CCH@5-hMFyF@y=`pfLl?85G|CQCR^1g?ww$m{zDk5a1k;ZzSD@z&343p!fAL z98KP2F5BkiyEGh@wNF@8i_qi+Ki^D&gpXpC_Rhzmk>OZ2vCDY)qV1#ZI;KZ!HfaSt z3ZS_G7xHS_w;?fn^@D?l3#at8M*RsF!M`SDMJY#J!d@V2PqM?hIjZTs0DAG977oixYJFM1jm$g&sxD&k z%NoZEMj~_Hkmf2_X+a0ZdZvX0(%INZkvC9?ipwnO0A;33MU6^YI^~E4JW3|Crmb^a z9af)idG=L7P){=c{z`nN*!%!}#B~>i^7ZpiR9xbc(=xK+P^;<{AFQIif|iDXcV;~I zNn!2Jy~j}Q2){WI^4w(|baEk;u$-Ml<92jh9xEopqvTDp{*ZchiAq^lxd}$HkHwA{ zjpZ|<#$_$oXs@Til6$YU@x0kUkR1FvjC#_EkU|J@>~K0SFzpBfM-$In@d;?CiXVuq zrKM|@bAcldDt@5y`i&Wa>D?8UFNM813A-IkYJ97!g${RD zvJ^+XE?h+J8G+C-A+#LW`d)RGX3HjFKqPn5?R&%knoN`{-sW^(Q~+1o|1BYB zw|fLf5g{RA{AMNw3K^CPrvPGZ2InxQG|6lEPJ(W*Xn6$aMn3b?@YL9EpTJYZV}AlO zXJ}`4w-Kg>_(5A(7v>Sc@za^90Y)qNV5|Zc4ijS0V5zCiz=qI;xoA#DE5-RQoBz7f z)D6Q(+`uCYwkMgVEpqM{Cl&0O4TppfF$&V&GFw@Ow4*9rzBo6E_rx|V>J!__ck?4D zfN*b_nVJ7+S?k9#H8r*7;}tD?c-&7@ex5t$s6FUR&}X(@-6YA#4i69SI=L#+DmWDK z#xwpS+2{Ayzr;|zDcjw>F`B2VoEB0-Yb{}*6%=vj$5Fk|`=X2Zv{z0r_XBr6dtBY2 zrhaP6&7FVNp?gc{-;?n!0Xl<9Lt!j>aJlr8WFm&MlVuBzZxNVV=?C3-NaqN-Erw-W zg+u_Ahimrvnx9Vu&JS4~y}iAev704^Q_)awn2wx53m+N$4Vcq)pb3arS$$QS`5h<3 z4m~zDXy5rknmthNn5X0no~1VF-!tGaQ3Mx$2gi zP!d}fTliICdIaQ*bQ{Q@wniqr&BkVO+x>&nf2QqetPD-ZPxpR69}`pp|bojHcmw>nT>S&{7ZDW#@ck z)Ko1Squ-O)Fkc1+5r6&wA@*tV4Na*F<6Y_W!@~k!%>y>cNHDa^vHNGo6bGzxU>*%% zZuQWyL1y{Ejo8wO41)yc9|Ya%E&_fPG#Du4VMRVC3}ZP*!NVycWnqaB5p_z`B0tF^ zJ^#uF&*fH$q<5gg1RVoXKTO?aK@xlbjggP>Kag`GAG~m6cmDAO9ag+x_MNuzi9y`f z2v$)znhVQrnUj>FUI1^AcOM4tR5=G7d|wG%9mTN_nNM+ z8_}BwU|`n&`h|z$uwGU<{%UbCaf(i7vtWVzo=Xo-a0)npL84a=TysQ>eajt2tRZL) z;<^KCOJ^17wQ5zmOn^oQ#_XCIW>5fIPc~de)qv|<$9F^NoI`3D zxebN)@<9GYfKk*!71<*7VIRWqlJ|0DP!#m!J+z%gykVWz$1h;ul6yfC`~1lLU?!5B z$LboW$zbxujNXorc4#KFKEVJsTch*}0BUtGkB39A0V`({*@4f#i|4;vjE?V4TL-%+ zp6#OxNV23^?udypJDDLvCV;dm=l-?(tGW5HoE`&|F@%&?-Wf=cGHy5i(mNUrep}a3 zTUA0$jg3M+hl~A^cL%b>=-fG|i6$n<4_C{{?>k{ZoLAV*Nj6Xrp`fJHgMJYRRUW(x z4!(eb3l~;UxC(JF3cAP*ora4H4B<3!dMo%=Qs%mmLVc==rO4|ItTM-xb~ z$pD)`2eb#$dm?73FOT8`-Pd9C-A#0Uk)xw*i)cxI0I7N)!&Zq+J>SsC_9!mqj$qej z(;RWC62!#BL}qayn)=omM6wCpb`%OW6`=b8&>(`Yz{LD~dXe_x?6_)&fsT$B08=&K zW!IZ^=9+zfhA149EeQex1{FygL>1i@L*f*#@eg5iNy>48^FlS4sCD?H`;%M7;KmgI zn9?(keZrj>N}Uz|;2*mqr6=6S6y(T3bP09rp(Et5Ht_n_kO%tuw*TC{$7vf!rzZo| zmUM!jc}e2lQK&CMUGSnC&Ikndw1#PAQPC%^A7Ck5fRTi(=1WUUQ;-iiZO;=y`UA5r zF2dy@`w+5q`NNL9+-wcD0BCgpu}vmTiU!_8EF%|MSWwB9u{ ztK@X_z^upb$px?!yh=)%s8(nKes&)k!#;n;fp1OE%p^crA%5agMXP9wLID{MnX1F4 zQGyZ>5ctAtj}4xlH#tMko0(y+NIWRQD}%uffh#ul=gn_LXa4}HgRw$;x0f&Tcy-UQ zjl=04pOqD$<$jnx%zd4Qe3h2AYd1EAuk$$W2u0cRT}(}lWw3C~>qlz@<9gnQeTNC} zZ*=ci6gj}m&JD&M{)kwD@`{0-UE1YS_jiI83=9keb4h;w0a{t!@M%f1lr>fL_16#8 zRQ|;U&_^)aEag0Zz-6J0Hom$V0v@vUBO{sy@f+d{;|$7=O!~6lKuZ+m1)U3U2mc6J zAdC%7xqt#r;r+fO3;ftZ>&2d1z>laK8>532+shJ)>?{`NuM@qwG+bxfjCzSlU^yR( zU`WMXbi!lKWGKW845l;9FD^Vc_(w65-6kpd`Iv%DizgHe_p*k$*|k+(<8Mj(tr&>M%<0Q0SA zpxu>H0Sq=Ida$hZ-^?(BD6kHss>D{Iy<)JsM+Y#p=g=cKG6!P!z&8UNNLC>#<@DrT zxQL+xoet<7)_J3U^5K7B+W}_X2=)pe4D_gvOiZB2Kq9OHTEdf08nU`t#Yy@NfuUm9 z>U?|&;J~}slXE*&>P+K$YHHB^(kBf}*DnLonV4HysqhMRLEZ516P^b~*N&AvnW4-Q z0jQzHz1V#07JT6soRgz+a6+kTZhpr?tSJzFeK;tXtbWv88Q=*lpb>Nyx;i4&u7S`4 zS$p!;(aPK4c*uJ7ZJNRVOw``*RywZ5qOMttR%CpWdSQ^KQObY{1Ygt3^u9cft6xyp zfPYr7xVGi4Y%3lfm1YqKO|8yyM@yutcG;e%he|D_0+=G53Bs7V`udC>Cp*eu)0$O8 z`@+`h^w_zx!r2zSzKy^DaJLIv7+DBzoOnPJKl3Nci~v<0V{WrYDI^mJftBBF|NV!@ zB-m@`{DExjG%pqy5O|yVq0J>&V+dgmF!rYB^blNJg6D0p(-YM|T#t@}!^s=a2tIRQTt|+poG5!=qwj>Z9oyTrNgaD9&9cB$6$i z2>u&Oue+!4~C7FQUFk(j)j>DD-G&!closh8QaIqV38r<5c@5pBOi)ec?CN4wf(lQ^c&k$0}uJ| z(|&)c_c_|j9~`Te-0}1CuS*hXuzWsYg2P$AHKD9zPPbO^$98JZ3>;B9U%a}M!l&o77p!Ccmd_@ofQ%~ z?HJd&Z@JpYeHXYSbvlgK$t_lC&y*BWuwC|60_I&EA7&}g`}+Ip%xoGA934I0UhKw% zBwh@Um{{`I?5@YDv3&Gmg@?e^JEJa3!5%^nOoqu55?X2-Gf7%GC%!kcYdAIpQyKMF z2|ys+d9w3yhUd@VABNpEPoKA~Wb9AElDz!F!MZ<7)pokLB)4~)pB49) zhDIT+r;!oXr}FZxU(lHhXCJ`2eA(YMj6e((pO+V^rli!htIZse1nvHw{9N?0(-V;O zp-@)e@`9$P1G+rYq)z?lp&97qc#Ofg9x8M{!ZF*F`Hp%CEoElGJKgC(f{mlsCnl}f z^y8~tSB1_>F7GUxeYHKZ4hNP+V{=P!Gb2E$XuD zZ)eb&H!62ockvE5t+q5yT9WAvZc`Yo_EG2OdrB;*bDh2GGarnH(w?%NPEj9^pgiOi z8=G9mUBf=fY>j+sltM`Ld^n!{%E!Hp8?aGt)QSS0GR%K^5;V9n6t-{EIOnu><}Y!U z5qvt&*miJ8HC#p&tYkhoxphnW?AxD?KNQ?`XD1xRR;T=KC%^bpTponyX;%wQPQ{(x z*yztu?|@Pn{N<4`0LpCIPk>cp(mEO%DhAH+ry*}@bJAVO@7j^Kdvo{vxo5~>)p0HO zNTbw^g19N<$Fa!Rq{H@@m91mb$95((6oZAY{&WDH&S;$ok2MPxq|=QP!k81Ys|g#& zIYw(~s{$?&Yy(C;!;--v;dAFEljC1%L&RpH5%_0Tvzgb$du8 z>Y*!FncWID3Tf(pI<*yk<>QY>1gu(KhsAgG0r-i~Q8yY+z6GOcWJSa9T=A!NM*syn~MOfiR^b>kbtK#hXusCc3}-ZGlzcgi%p@ zh4{+2%GmQML5L5HWZz#UNEBP3yqddHCAdl1RSU2_q)LhaOo>A2HwKHrPUGO>>T^0( zF4zP4kfOwcyx+l;6CQ5-0N1^b(?$xjro*K6;U;gSxS2AghD-;28xphTgNJvBV})R?H4D1uRN>Z zHoNJM?}|z1v7O)J(G$ZbJ|+G9dEVT(Bliz!l>Iv()?8Tz%-a95_2m3%%pvW1JM_@g-c>F1o7S z_KC4~X?9ji?*NK9Mm9E57#Y~$dd5vXOG*m#s2}-4A$mVM5-3|oLn{1efj8N44NR{m zWi2f7x8INJ*sTo{p{yp3ows*(BQ~KBiO)*IMDY+`_4N>mz%Y3A3i|e>H)5+r;&>H_ zMx|2>OkQCCbTs@fux4P9{eu5t9Yod1=(*(N>re$r`sR0Le8g~s36*n+`jreZ83);>L4<)G)Y&moHM2(v4*!^;| zu)vp;luWL;c;SLk=Vs28_OV1`l8F-q(qKyN+TeAozZ42FiN&-^Xt+MOz^7T+R zGxp?|hMr!G9o0Ee%IZUC5WpeZpZZC}s>VSV_aYV>Wi-3^_Q6<=sJQsu+4d7k2;r;( zQi|OVRXRtd4RE+9>plO-;Zonndhph*gFS=2y;>+JWCHp@Mk+5U!iuK4?Y@YUm<yMG@DRF8>ZOcZ} zts-r|psXywwKA<7EUqKTxZL+|DaWS$^&;xh?8smE#AV9lu!8q~Qz{1fG!HBokUv>S z#q}0q>!hyBt2c8xBCbN=Gc(nP=Xt-ya0p9;@~1t+PEMqkm>xAC7b@Lt&9c(k(u;?(NxaLc8I8>jnQzAYR5g~en5d`Xz46*NUcX3G!! z-#l@t_TRc!MFG;edoT0%bD-ba$ot6y83F$NM^0J!r^h3Mu75>NBoflmtti`;{BRq5 zNNih(^ZxhD7niGLb$Z5%$2GLJxpm^l=9kJQBgKHCqi|8@KIb)$E26(NFA`L@cN|Qq z4mj-hcyDLePR;I6~8BX}9-tH{d#JO}*LAW&c z?ng+H;nx!*!glKO--{$MyPldn)i|QkMzIn=<)|O_@ssQVli|s#(7IIKE_p}B1Kcc% znhXo`Hku=R2h7;1Xkp|!x4(G@Yc~B()DBQe&&1n)L4S;0uX;+%Id}(PK!B)*kT8c|r9L zxnL_Dt;dJ_rP4<=e_U}r20f`ynAuRMmp@6!Pkwb9$i}&CUA+5%e{o~Gmuh}>Cimm) z?6Q}pM4D-@A*#fgowl;+_m9AbS>b~&RoXXx$#%L+n0!F$i1a!PDnIF1oxQj@&XRIw zR86s|`Z6|uKpnHR!fLh6&>Fc=HUXQ3`dfU<2lb9a1X;tDjV*V`C%Q?x)Z5IWJ9RI88`*9O2ZNArudMQbrusePVGrQ*)^diU^Ul$YWVeqC!0 zr?-ceZ?5^Lde5Z`g;SDX0!(u|NN?e8wB(ZDS;ZNT1N#wm{gESEpHins_vy*n8% z&L0almD!iFEBHfdvsPaV(NH`CYPCfX52Z9OE0><}_tmR{vWuaX{Y4xd{qK6jascowzt`wed-bWM&F zyQoP-s0j3=MG#{9WF9{I^;3>d zO8qv@u(Y_y32&dA+UgYlml7u$_rcn#g6`?;{B-#H#?dKn3r!Yq?;S3Dl}&M(@U9sN#pwI^lG`H&}j7M zg(}UVx~d{>m*D6u#W+^U;IJ>Tmp^}+eb)S*td+fNxp}w&>(9eb9X$n_($GyEJ+t(p z(c+QM*Vf&z7W)tOOqw%pPYk%sCto%D*fY1CZPs&_yLRf>4gMoLl=s(c`h1l^4D35Pii$kW<8bWm_ka!ONTE1`eXZ?A^ zTxT9=0aDtJe?ABrfl-DcQA_Vd&2 z!Sfw5K5#&YyVNUXaLQLXh`j3$i>q zTWg*PE3Vy~^=D*~ZpG|n1rEJs?JMf{MaHbhbjCJtU^j*~avI$eW4|kX{^|1mlEji- z#-9f^cNB%9=M5UGUD_=*&wM&Xjl50%*)KL`UoJuvoU}X3^54S2Uhhw_g8eZe3(yDB zqmlK1P=c0Kbb^1jK(}@C9CA>6a&~Z+B{9-(j&ts#{uDJKQKx5+={X@fa6U!G6(Mzv z@>hI={?5$v$g>FbmPPGKz zq}5EsP9Nk_%`XsaD%VYDx*~-SJ+05wX4%8Le2oN4K0!kfiS_Eg)<<^h#Sg+f$C_R% zYS(!lHboUU>E~~%9tWv|#UCowqv;khBqM`m2@qH0Mxm0L zvg#mdqkO2MPqnycG}A?xmN#1SW2v|^!QkGd+@nuok?DWUtCjOHzlO+-)^(@7<*)x1 zQ9fGgnq(I(N1GBoYi(=vnYJ6~yQ^FtSH>Ihg5vc?P@jI!@LEDvl1~l}b5gXtCarEF z_V_`HyhyR_7|9n{5Q*G32Ie^?C622OC3-lrLx!82IkI0}n~Cn_Xm}I6^%k9CP=){C zA~kjy+F1_f*O!}^DQuP&dcNj3%v07{g1d1QzMZ!H>n6q?_~Acuz+ z{xl;}R#iP}vbg@S%(P%#1RQs>%hzV?O=i5ytuKQ|dChLpf+EC|}Ss<=6S18;0>Jb5}7}|TdV(nVOpZ8K3A?&;grDd3T z;ZZH5bmBsoGZy1dBi62Sy1yshAI)-Cl4jNChtS%cokygSPIOBSw0>D@m*B7>I_(yk z?S$%{f#Hy#*_ZG~-|b|>xtg|gUc&&E`>{>omQvcYS8m6$BO8gu6Y0-payYSL!e{sg zoBL1jjfJp~JCGRGQ~>fHS?8`9PaXx;jb`5eKbz$7)4Ze?vP=1_lj>3+K_aSDt@Z!D zM;^YQP7vGRSWUxX@7}zidSjK?N$?V^_*%#YU{}@ra76nZFxpiXhb(xraVJTYg2xMU ziAVfn1ol(cUH**XQF_0tu#+_2e8`L-h@;s$!kY16^vXKJUjLaX5TX*X}DjvQ{8BZ1Qs_Q!hMDJMkL4_y^}MRmmV9EQIvdA_}pdSed9 zZ&(Z2N$LHQYc79Yi>^(LjO$DXIngJI5}#o?T+j`95FUpyoV2WB6-`ewd}v`{n!`V zPh9BM{NYZxjk(aA-wARvGXbY7m{OW--6X-8E-w4VtCx%^uoM*z zU6yyhCxr!%Q4!lfOSEuj{CV=rpPdbz_|-I*>%MBOh$~k8op7}D>~1O3y~V&{m&;?B z3fOsEn0Ywg*N2DhC!M^l#D-^mxQN&DJ;M+G&gX=*NX&MoF7ddj5yBXEf;kt>*BMn5 z5h89umHFiy?e0XwztpX~;TflQ-P{TfcHR5Hu~6xO1Sx6(SCW2<(c6j#WS6mG0mQL? zkj#G(nx+r96r!ulTCjfF+Hjhz?in48+WCt8*fdMLf}W-+Vz(>s)NrgmHGL=WcU6FD z?Y(h}QEzTvB!>G08@>vqL_>9S_tt*PVC3+um*SUfRVcM|QF6$?A1#?`uU^Kh_;EGT zi3#>6M8A)vqhz}O#Eiw=nW%?09HQ--_21u!Pg{(l4GpebLa|nE+-cOL#zR#|BP*5M zEd5Bv(pc@Ht>}?%OBY?3woLrhYtw?i>k0TmBgiF<4s-Ef&FaIE`*tM$WZAl8CMDV9 zYOm|E2DP9q2`?`%$33p6{`{W}iH&KjaI^WyDV!I0!uFX3Z76@t$iqTw&=LMH`x}3L zEaSm!?$9lCtPiA?Pmc&*)z$Wi>;G9bAL+E8HY+@UIIX+zF>Ne#WBH97yriM)7G0*B zxT0!!PiW$Qizy486;B5b(`#>{yx_T{-k;gL1+gXb%gZNN8*X|o4)|^FGHo5wpSX^+ zPr*vji9KTAfvHha54nC{fQ2Gn_605Rtns@mH(Y!mOBTx)G$c0t-v|}5rSeDjfpX!p zwQj5c*-FDCriZ$CQO8v`MSA4q+(SYZw! zCLbRk%%`esGY3O-0-$657_aqd<xS!;h^A=dzp^X7HvN$S8rtPjl65gGWyiEO2mN?MfEMV2u|mWs&46iK17uY)w%cg>Jxc)zFK``-7x zuRo5DGjopfJm-0S&+nYycd^Li1MF&ll$QNR2|rE^h2%V=?!D1_z%q4&gayKPh9n6= zSf0|9k0wGYE(4OFR5oZAHc$+CXiU`)b>#`ds~PB0B!R@^bUry{L3@QaIRN78Y6KJ# z6qS{Aj~z>5p9GGgTW#-ApX;dPk;NyRd@Z#ug(3np6ktxe9v+G$zJ=~Rc;~MD5tw%X zKU7=07+pf5Jn4&e^YNfQ58&%Okm26|usk$9IS5#gxN%0@qYZx-=<9Q;xt3P`KQi@X z_UjP1QNq-;M8bt6CIVZ90-Xd90#r}D!K2c2yL)^0G~F9Pr48h>*(f;wyBxhtD+3G& zafoyjplzQ!J1HUwgXZgW&(2L!N0knBDuRG}uD(kvzzINLzr|3* zz}VN9bi%+@LqJ+TL4DGYyp1N^=fZF$q_i*iTwV1~SOVS-aGw6&c2O##5)i~I5Pnw= zm=Xg_Aecs@nfUX&o|(*i5xV(PH$g#@IJi@;9=R@Gu zFIq{-0w@NW=^Ggsh~nraB$K1Ca62={eS6AFpqMLwCOPcvydKd0m6=Q3pu+{M&aVLK z6^$rmar~Nfvun7PEkFC-H+rhG_1RBQ)$RSFhKLpT`A328ulnELMCJ0!7-%q8G)ZDy zeQx3ih&hlGw<|{nTSah>!IuV~S5Geil&1Dt6{7F&DQ#Vn^P?Rt?`&ohq+`tmo#EK5 zK^7|*WTCax*<|8o;-uYMwEQn4sr)F zK4%G#801T!4v7jt3@)!U3j}yS5_4~D9cwTBvpKSZHki2?^~OmN8g@$vLL_4Mj%_CP z|HSD3|0E7T#QIfK^a3jPgf=vT_yq(i0QTWqIUl@s0zwtrz+6PVori!VZ&zKq6p&>+ za6(U{(e`+i{AZ#UH6F~c;eqb++I|YVHNLuL@axy}jG1W&eRC(g-W^pW{RjzoRsRCm zeoVuUku)3}hA2<~QXJqa2n|Q0ddk6ogA?RN*G8q=<0m`HUQ?|7FQ7sVYd&6}d~4Pn ze|vQ)iIPGcgDS#G#);W|4GS@}c!{auVkr`nNsyW&l{I1T>`1^dIOUDTf!b7YN%hmf z#?G|?A3>k9OiXMMUGXWrWk9Odr+CEppQR@&cu**TKs-@>F)7Knuz#w$&-zIZ?Q6JY zuR?Y;ckGRTBBBv7hXBjmqGv>xz7a@BAe};OyCDwno0b+J`EMZ0d?6tr!OVxG20%Ui zCn9`}lF-_HYM-w|iWdn=svp3C;P@8-E<{xXifAFa9N)uDJRcn$ zU73Rk8u2`EYFMwWy+ciMn)oFp^cJoQ z84(>mzLgQ@1Pn-`Gj!_-0yPjvmxFqGyjZ1959W=1?&-M<_}ZTC;|#7!h=|lRRCM;7 zol8_dHeZO6I6(r3;CgC%)@0wo=Ps{-IJ>+d+gR@qmn3ij%njdHAuJ5=e0<&9Oj7ql z==80#h2m$94^9WiKVG77EpW$yIqcOPPewY+D})85g;b+i%QECcgq1fvJU|JL-a2tH z@U(%^g3ZbX!Yt|AK3lC8hKs7{tYvsOtS1f8$xgAVJKI)^Qc-?n^YeQ zw^dl*C(jdKIjQ_?m3nLE16Nm9CuiqdAC8F+1zGvDi}u$) zDP+}G8EK_Lmn7xcv;APQdLAzyrGRjmgy8fLyC4P>FllIHL_uIhkxt{9*0-|F>(;LA zc3+YV_XyENi@E^iShryVhfeuTR(2O6NY$hLG!tv;LBDH#t2#W!+o3?aTIilY9)gzAS;z+qBtCmr34>A1hqY}pN*>`d>6i2K>l9g z^3N@C_{SIid&>vqQi2%ZIV;CQFnzqdyyObzV?^5jN_@5w7Kh&3<8=hP0*J})x~Ji` zHB$O`6T$2~%&L`&A%a?(C%#;Dyer2~1ZD6LVtygJ>*2zuLJ-+9E@E=Ce(cQ=I$Wsx z*X~s=cjXuJ5Zd)!DSdq|R2t10E3L-0^&9XD&V%)mSbgN=BZ}w z9UVPqc_q8*Uixu((HVlfTwPOP4g$dvC6~pn56@NZWwTDGW(Lj#fCw+EsAzoC6C!J( zix)e=;{{aIWuF#&mY*k%@ZdW=-if#;PgRdnd@$;F`_K!&A}$AE;x>I1#azM85iyo~ zukLm!C1-~q!6%-r=jZp*$;s(xc9VHOeedqwp?c-l6ZNG0O_CgYFji|YG0mlIF69Ug zM()eX%zPLZw+&qn_QRcz(ONW6YewmL`FAdaTmU4_^fIJ`%z4qI*x(p z<-_H<7a2*z;_HY3>*U~YA`*5fDf#(U$Te!gV6LgJe~7QTHayh@(Ji8|?L=%-L&Kvl zUu^S+--^TZ_hwZ$TP+FF70BR?FiF7rj&)<^v{iHh;hKrS8SrVm?rKx()THf-Z<_=Q zS2=h_{eChD%$4@6<$?Y!xsuHcP4_S7p~a91Av~~H!|Yd<_m6k=14@^!du8!^+b0gg zUy4dTeHxQJ?&@l4VR0|XFgh8leK>ltd7>+|OvxZj;`d4}x4kqn%WvERtuY{{AO5rx z3jx8VhRKvG+*r+IJ_ZhQX-FIlPZ?g&D`w+yq;#WTG!~k+wzdg8QQ-T)^CslvsK#>W z6Ak(PbgZd)AMsdQR`3=+KhPCaC!x1_cYvIK@00~F*Pc`KykZ}%si9ynnp0>ozWT6O zu7uNk7Mx}=C@=uu_*q{YW7!;7Z`V@=H_d0!jcmGhW=4WSKpNT5fO=(FY|jh z1F(!nl(u_3dil~4H<@G3NRhfi+UES0esVS;_$*`P`@D+peGShvO0Wo|!r1zkxpUX4 zsF*=0Z%o!mU;k#2?Y^T&6M-asP%IV5{EL^d}!onom@l3)mB*d`@1Y$8yb7Hew;J3jmXFnEGu8-7SV`%zAb-@_i3-CE= zs;l2ToB#bnwzqgmX{j>YhoQjpZlfT!4kZz5*C{A?$9}7!Sa&zr$C~00+`!(7nDnPD zR@~T)htIA4jX9m@d=O)99iC((SI0!(9*suz2A?6_(P(C^Xr(ug$IWAU7=ls|Y-eR9hN| zAo`o@%@ZmzGP-0W!dHujF`=;V{INvdvOHXCx<0|`z=8Ttk7Pf0S}t0!pwf8~-sO!s z?g^8Vm}S1pOA$syB_!l_^YiofxJO2$Y4XOWvqrbf20@Va!ZKf^wM_9XLZ5j4XBL%C{XU zp7>Lou(=x&_T)%0UPtI4tVtHb149}AMPs!7$kV3&#(e)Xj7AY5p&D=onGEKnz$|2g zU=BoJ*w?MtxM=Q?>Df_6+S)-9!g}HU;=&gTJ1s`DKD89QK?bCZ=-!O(D|S^m+p0)W zh+WZASs8!aFuF3TWAi|e!fulBQ?PKJTAha#F18wWn;ce=0Ejj><)2S~#TZdcXNj$( zYH}l{eY$T>=T7VT554A{nGx_mo#Qk^t7XXt%UA8)o1qI%kFRUtT<*#dk*8PrPmNV^ zXxX_d564Y#*{$o3*U9VZ={cAA&m?tqIk?-M=oLAZ})g%LZQ&0u94pP@$RPW zkW9_>`IclDtC`weMQeS+2zq^YM!VzIjT>=zd_3b~F&MCwIn8bmwIQLhu%DLna&~h1 z>ZxXGZQa`2;rTf`IeSmw=s6xmi-PqP#nL1Gdt+zUF?$k^AEgoN#b zZfEKeG_38WRW8l=jK|irw4{B~oGkN~`~7!wOxd2UuDxL7fVjuBd;UTNsZx%@ncOS` zYcB}D+`fN*eXm4{U|~7lU_@Xof=s4n&6u)+e8Q}ttw(lgZDZO#dFPac_=poi&q^wA z>Iv@gV>4$)6Vl;Yq&4w_`IClt$HRbVL|%+D0+*Rt65^CWw3;bYC8kXMedv2k0yUQ$ z+1R0VeSF8xolbb>mXzvQGJk$K>r@US=~J4)5JvBNhUJ%vL#I#YJa=h`$0K`epwX-2 zVcjEsx-IP<-Jd&ovu0RajS^|c28Kdae9;wlL16xaGES7mw}NFdtEGDoOH9#H`nB|@ z`BztUawJj@ywC|Xl3ph*y+gq+KN0wHZo;S(P`Tw>XE@jBADM|SZYCz4mkvLbn?Gv0 zWjJ()Yk>2!j7U%%RL?_r*YZ_KFvsp+xpF1z==%j`i>-ni z^Cr57U)l6d&)6M06pc47#qrFY$lPPFo*v*M#B_e-WXu167c>zPMcg41HDqvO>hf{3 z{>tuN1u^ev{7BQ}@S%3Sm?blmpP1;x>WCDMC@guwex`u` sz12p(MxO2OEm45VGx0xL!T6ckwQuL~w_c5^CO_M)Yq%p#$L8F>0Fw=KX8-^I diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index d06ec189..00000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -================== -Contribution Guide -================== - -.. toctree:: - :maxdepth: 2 - - welcome - development.environment - first_patch - first_review - launchpad - gerrit - jenkins - reviewer_guide - running_tests - test_suite - -Modules reference -~~~~~~~~~~~~~~~~~ - -Zaqar is composed of two layers: - -.. toctree:: - :maxdepth: 1 - - transport - storage - -The **transport drivers** are responsible for interacting with Zaqar clients. -Every query made by clients is processed by the transport layer, which is in -charge of passing this information to the backend and then returning the -response in a format understandable by the client. - -The **storage drivers** are responsible for interacting with the storage -backends and, that way, store or retrieve the data coming from the transport -layer. - -In order to keep these layers decoupled, we have established that -**checks should be performed in the appropriate layer**. In other words, -transport drivers must guarantee that the incoming data is well-formed and -storage drivers must enforce their data model stays consistent. \ No newline at end of file diff --git a/doc/source/contributor/jenkins.rst b/doc/source/contributor/jenkins.rst deleted file mode 100644 index ced58f8d..00000000 --- a/doc/source/contributor/jenkins.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=================================== -Continuous integration with Jenkins -=================================== - -Zaqar uses a `Jenkins`_ server to automate development tasks. The Jenkins -front-end is at http://jenkins.openstack.org. You must have an account on -`Launchpad`_ to be able to access the OpenStack Jenkins site. - -Jenkins performs tasks such as running static code analysis, running unit -tests, and running functional tests. For more details on the jobs being run by -Jenkins, see the code reviews on http://review.openstack.org. Tests are run -automatically and comments are put on the reviews automatically with the -results. - -You can also get a view of the jobs that are currently running from the Zuul -status dashboard, http://status.openstack.org/zuul/. - -.. _Jenkins: http://jenkins-ci.org -.. _Launchpad: http://launchpad.net diff --git a/doc/source/contributor/launchpad.rst b/doc/source/contributor/launchpad.rst deleted file mode 100644 index e5f4a6cf..00000000 --- a/doc/source/contributor/launchpad.rst +++ /dev/null @@ -1,56 +0,0 @@ -============================== -Project hosting with Launchpad -============================== - -`Launchpad`_ hosts the Zaqar project. The Zaqar project homepage on Launchpad is -http://launchpad.net/zaqar. - -Launchpad credentials ---------------------- - -Creating a login on Launchpad is important even if you don't use the Launchpad -site itself, since Launchpad credentials are used for logging in on several -OpenStack-related sites. These sites include: - - * `Wiki`_ - * Gerrit (see :doc:`gerrit`) - * Jenkins (see :doc:`jenkins`) - -Mailing list ------------- - -The developers mailing list address is ``openstack-dev@lists.openstack.org``. -This is a common mailing list across all OpenStack projects. -To participate in the mailing list: - - Subscribe at http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev - -The mailing list archives are at http://lists.openstack.org/pipermail/openstack-dev. - -Bug tracking ------------- - -Report Zaqar bugs at https://bugs.launchpad.net/zaqar - -Feature requests (Blueprints) ------------------------------ - -Zaqar uses Launchpad Blueprints to track feature requests. Blueprints are at -https://blueprints.launchpad.net/zaqar. - -Technical support (Answers) ---------------------------- - -Zaqar uses Launchpad Answers to track Zaqar technical support questions. The -Zaqar Answers page is at https://answers.launchpad.net/zaqar. - -Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can also be used -for technical support requests. - -You can also reach us in ``#openstack-zaqar`` IRC channel at -``irc.freenode.org``. - -.. _Launchpad: http://launchpad.net -.. _Wiki: http://wiki.openstack.org -.. _Zaqar Team: https://launchpad.net/zaqar -.. _Ask OpenStack: http://ask.openstack.org/ diff --git a/doc/source/contributor/reviewer_guide.rst b/doc/source/contributor/reviewer_guide.rst deleted file mode 100644 index 90517c37..00000000 --- a/doc/source/contributor/reviewer_guide.rst +++ /dev/null @@ -1,165 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============== -Reviewer Guide -============== - -Overview --------- - -Our program follows the usual OpenStack review process, albeit with some -important additions (see below). See also: :doc:`first_review`. - -Be Professional ---------------- -The PTL, with the support of the core reviewers, is ultimately responsible for -holding contributors accountable for creating a positive, constructive, and -productive culture. Inappropriate behavior will not be tolerated. -(`Why this is important?`_) - -Do This: - -* Act professionally. -* Treat others as friends and family. -* Seek first to understand. -* Be honest, transparent, and constructive. -* Use clear, concise language. -* Use prefixes to clarify the tone and intent of your comments. - -Don't Do This: - -* Use indecent, profane, or degrading language of any kind. -* Hold a patch hostage for an ulterior motive, political or otherwise. -* Abuse the review system to discuss big issues that would be better hashed out - on the mailing list, in IRC, or during OpenStack Summit design sessions. -* Engage in bullying behaviors, including but not limited to: - - * Belittling others' opinions - * Persistent teasing or sarcasm - * Insulting, threatening, or yelling at someone - * Accusing someone of being incompetent - * Setting someone up to fail - * Humiliating someone - * Isolating someone from others - * Withholding information to gain an advantage - * Falsely accusing someone of errors - * Sabotaging someone's work - -Reviewing Docs --------------- - -When possible, enlist the help of a professional technical writer to help -review each doc patch. All reviewers should familiarize themselves with -`OpenStack Documentation Contributor Guide`_. When reviewing user guide -patches, please run them through Maven and proof the resulting docs before -giving your ``+1`` or ``+2``. - -Reviewing Code --------------- - -When reviewing code patches, use your best judgment and seek to provide -constructive feedback to the author. Compliment them on things they have done -well, and highlight possible improvements. Also, dedicate as much time as -necessary in order to provide a careful analysis of the code. Don't assume that -someone else will catch any issues you yourself miss; in other words, pretend -you are the only person reviewing a given patch. Remember, "given enough -eyeballs, all bugs are shallow" ceases to be true the moment individual -reviewers become complacent. - -Some things to check when reviewing code: - -* Patch aligns with project goals, and is ideally associated with a bp or bug. -* Commit message is formatted appropriately and contains external references as - needed. -* Coding style matches guidelines given in ``HACKING.rst``. -* Patch is cohesive and not too big to be reviewed in a timely manner (some - patches may need to be split to improve cohesion and/or reduce size). -* Patch does what the commit message promises. -* Algorithms are implemented correctly, and chosen appropriately. -* Data schemas follow best practices. -* Unit and functional tests have been included and/or updated. -* Code contains no bugs (pay special attention to edge cases that tests may - have missed). - -Use Prefixes ------------- - -We encourage the use of prefixes to clarify the tone and intent of your review -comments. This is one way we try to mitigate misunderstandings that can lead to -bad designs, bad code, and bad blood. - -.. list-table:: **Prefixes** - :widths: 6 80 8 - :header-rows: 1 - - * - Prefix - - What the reviewer is saying - - Blocker? - * - KUDO - - You did a nice job here, and I wanted to point that out. Keep up the - good work! - - No - * - TEST - - I think you are missing a test for this feature, code branch, specific - data input, etc. - - Yes - * - BUG - - I don't think this code does what it was intended to do, or I think - there is a general design flaw here that we need to discuss. - - Yes - * - SEC - - This is a serious security vulnerability and we better address it before - merging the code. - - Yes - * - PERF - - I have a concern that this won't be fast enough or won't scale. Let's - discuss the issue and benchmark alternatives. - - Yes - * - DSQ - - I think there is something critical here that we need to discuss this in - IRC or on the mailing list before moving forward. - - Yes - * - STYLE - - This doesn't seem to be consistent with other code and with - ``HACKING.rst`` - - Yes - * - Q - - I don't understand something. Can you clarify? - - Yes - * - DRY - - This could be modified to reduce duplication of code, data, etc. - See also: `Wikipedia: Don't repeat yourself`_ - - Maybe - * - YAGNI - - This feature or flexibility probably isn't needed, or isn't worth the - added complexity; if it is, we can always add the feature later. See - also: `Wikipedia: You aren't gonna need it`_ - - Maybe - * - NIT - - This is a nitpick that I can live with if we want to merge without - addressing it. - - No - * - IMO - - I'm chiming in with my opinion in response to someone else's comment, or - I just wanted to share an observation. Please take what I say with a - grain of salt. - - No - * - FYI - - I just wanted to share some useful information. - - No - -.. _`Why this is important?` : https://thoughtstreams.io/kgriffs/technical-communities/5060/ -.. _`OpenStack Documentation Contributor Guide` : http://docs.openstack.org/contributor-guide/index.html -.. _`Wikipedia: Don't repeat yourself` : https://en.wikipedia.org/wiki/Don't_repeat_yourself -.. _`Wikipedia: You aren't gonna need it` : https://en.wikipedia.org/wiki/Don't_repeat_yourself \ No newline at end of file diff --git a/doc/source/contributor/running_tests.rst b/doc/source/contributor/running_tests.rst deleted file mode 100644 index 943b04e7..00000000 --- a/doc/source/contributor/running_tests.rst +++ /dev/null @@ -1,167 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============= -Running tests -============= - -Zaqar contains a suite of tests (both unit and functional) in the -``zaqar/tests`` directory. - - See :doc:`test_suite` for details. - -Any proposed code change is automatically rejected by the OpenStack Jenkins -server [#f1]_ if the change causes test failures. - -It is recommended for developers to run the test suite before submitting patch -for review. This allows to catch errors as early as possible. - -Preferred way to run the tests ------------------------------- - -The preferred way to run the unit tests is using ``tox``. It executes tests in -isolated environment, by creating separate virtualenv and installing -dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, -so the only package you install is ``tox`` itself: - -.. code-block:: console - - $ pip install tox - -See `the unit testing section of the Testing wiki page`_ for more information. -Following are some simple examples. - -To run the Python 2.7 tests: - -.. code-block:: console - - $ tox -e py27 - -To run the style tests: - -.. code-block:: console - - $ tox -e pep8 - -To run multiple tests separate items by commas: - -.. code-block:: console - - $ tox -e py27,py35,pep8 - -.. _the unit testing section of the Testing wiki page: https://wiki.openstack.org/wiki/Testing#Unit_Tests - -Running a subset of tests -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Instead of running all tests, you can specify an individual directory, file, -class or method that contains test code, i.e. filter full names of tests by a -string. - -To run the tests located only in the ``zaqar/tests/unit/storage`` -directory use: - -.. code-block:: console - - $ tox -e py27 zaqar.tests.unit.storage - -To run the tests specific to the MongoDB driver in the -``zaqar/tests/unit/storage/test_impl_mongodb.py`` file: - -.. code-block:: console - - $ tox -e py27 test_impl_mongodb - -To run the tests in the ``MongodbMessageTests`` class in -the ``tests/unit/storage/test_impl_mongodb.py`` file: - -.. code-block:: console - - $ tox -e py27 test_impl_mongodb.MongodbMessageTests - -To run the ``MongodbMessageTests.test_message_lifecycle`` test method in -the ``tests/unit/storage/test_impl_mongodb.py`` file: - -.. code-block:: console - - $ tox -e py27 test_impl_mongodb.MongodbMessageTests.test_message_lifecycle - -Running functional tests ------------------------- - -Zaqar's functional tests treat Zaqar as a black box. In other words, the API -calls attempt to simulate an actual user. Unlike unit tests, the functional -tests do not use mockendpoints. - -Functional test modes -^^^^^^^^^^^^^^^^^^^^^ - -Functional tests can run in integration mode and non-integration mode. - -Integration mode -"""""""""""""""" - -In integration mode functional tests are performed on Zaqar server instances -running as separate processes. This is real functional testing. - -To run functional tests in integration mode, execute: - -.. code-block:: console - - $ tox -e integration - -Non-integration mode -"""""""""""""""""""" - -In non-integration mode functional tests are performed on Zaqar server -instances running as python objects. This mode doesn't guarantee enough black -boxness for Zaqar, but tests run 10 times faster than in integration mode. - -To run functional tests in non-integration mode, execute: - -.. code-block:: console - - $ tox -e py27 zaqar.tests.functional - -Using a custom MongoDB instance -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you need to run functional tests against a non-default MongoDB installation, -you can set the ``ZAQAR_TEST_MONGODB_URL`` environment variable. For example: - -.. code-block:: console - - $ export ZAQAR_TEST_MONGODB_URL=mongodb://remote-server:27017 - -Using custom parameters -^^^^^^^^^^^^^^^^^^^^^^^ - -You can edit default functional test configuration file -``zaqar/tests/etc/functional-tests.conf`` according to your needs. - -For example, you want to run functional tests with keystone authentication -enabled, input a valid set of credentials to ``[auth]`` section in -configuration file and set ``auth_on`` parameter to ``True``. - -Using local Mysql database -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To use a similar testing environment with database support like upstream CI, -you can run ``zaqar/tools/test-setup.sh`` to create a required Mysql user -``openstack_citest`` with same password. The user is required by oslo.db's -test. Zaqar needs it because Zaqar's sqlalchemy database migration is -leveraging oslo.db's migration test base. - -.. rubric:: Footnotes - -.. [#f1] See https://docs.openstack.org/infra/system-config/jjb.html diff --git a/doc/source/contributor/storage.rst b/doc/source/contributor/storage.rst deleted file mode 100644 index f5f592b8..00000000 --- a/doc/source/contributor/storage.rst +++ /dev/null @@ -1,32 +0,0 @@ ---------------------------------- -API reference for storage drivers ---------------------------------- - -.. currentmodule:: zaqar.storage.base - -.. autoclass:: DataDriverBase - :noindex: - :members: - -.. autoclass:: ControlDriverBase - :noindex: - :members: - -.. autoclass:: Queue - :noindex: - :members: - -.. autoclass:: Message - :noindex: - :members: - -.. autoclass:: Claim - :noindex: - :members: - - --------------- -MongoDB Driver --------------- - -.. automodule:: zaqar.storage.mongodb diff --git a/doc/source/contributor/test_suite.rst b/doc/source/contributor/test_suite.rst deleted file mode 100644 index a5b4e373..00000000 --- a/doc/source/contributor/test_suite.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Test suite structure -==================== - -Test types ----------- - -There are three types of tests for Zaqar: - -Unit tests - Unit tests check modules separately. For example, there - are checks for each individual method that the storage layer provides. - -Functional tests - Functional tests verify that the service works as expected. In particular, - in Zaqar they exercise the API endpoints and validate that the API - responses conform to the specs. These include positive and negative tests. - -Tempest tests - Tempest tests are integration tests for OpenStack [#f1]_. - - Tempest tests for Zaqar are available in the `Tempest repository`_. - -Refer to :doc:`running_tests` document for details on how to run Unit and -Functional tests. - -Refer to the `Tempest repository`_ for details on how to run Tempest tests. - -Code structure --------------- - -The test suite lives in ``zaqar/tests`` directory of Zaqar: - -* ``zaqar/tests/etc`` - Contains various configuration files for Zaqar. They help to test how Zaqar - works in different configurations. - -* ``zaqar/tests/functional`` - Contains functional tests. - -* ``zaqar/tests/unit`` - Contains unit tests. - -The base class of all test classes is located in the ``zaqar/tests/base.py`` -file. - -Test invocation ---------------- - -When you run tests via ``tox -e py27`` command in the root directory of Zaqar: - -#. Tox program executes: - - #. Looks for ``tox.ini`` file. - #. Creates ``.tox`` directory for storing python environments. - #. Parses this file and finds parameters for py27 testing environment. - #. Sets this environment up and activates it. - #. Sets environment variables for this environment that are described in - ``tox.ini`` - #. In case of Zaqar it invokes Testr program in the environment. - - You can find more information about Tox in `OpenStack Tox testing manual`_ - and in official `Tox documentation`_. - -#. Testr (Test Repository) program executes: - - #. Looks for ``testr.ini`` file. - #. Parses this file and finds parameters for executing tests. - #. Creates ``.testrepository`` directory for storing statistics of - executing tests. - #. In case of Zaqar it invokes ``Subunit`` program which finds all tests and - executes it. - - You can find more information about Testr in `OpenStack Testr manual`_. - -.. rubric:: Footnotes - -.. [#f1] See http://docs.openstack.org/developer/tempest/overview.html - -.. _`OpenStack Tox testing manual` : https://wiki.openstack.org/wiki/Testing#Unit_Testing_with_Tox -.. _`Tox documentation` : https://tox.readthedocs.org/en/latest/ -.. _`OpenStack Testr manual` : https://wiki.openstack.org/wiki/Testr -.. _`Tempest repository` : https://git.openstack.org/cgit/openstack/tempest diff --git a/doc/source/contributor/transport.rst b/doc/source/contributor/transport.rst deleted file mode 100644 index 72759449..00000000 --- a/doc/source/contributor/transport.rst +++ /dev/null @@ -1,9 +0,0 @@ -========= -Transport -========= - -.. currentmodule:: zaqar.transport.base - -.. autoclass:: DriverBase - :noindex: - :members: diff --git a/doc/source/contributor/welcome.rst b/doc/source/contributor/welcome.rst deleted file mode 100644 index f7c81e25..00000000 --- a/doc/source/contributor/welcome.rst +++ /dev/null @@ -1,187 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======================== -Welcome new contributors -======================== - -First Steps -=========== - -It's very great that you're interested in contributing to Zaqar. - -First of all, make sure you join Zaqar communication forums: - -* Subscribe to Zaqar `mailing lists`_. -* Join Zaqar team on IRC. You can chat with us directly in the - ``#openstack-zaqar`` channel on ``irc.freenode.org``. If you don't know - how to use IRC, you can find some directions in `OpenStack IRC wiki`_. -* Answer and ask questions on `Ask OpenStack`_. - -How can I contribute? -===================== - -There are many ways you can contribute to Zaqar. Of course coding is one, but -you can also join Zaqar as a tester, documenter, designer or translator. - -Coding ------- - -Bug fixing -^^^^^^^^^^ - -The first area where you can help is bug fixing. ``Confirmed`` bugs are usually -your best choice. ``Triaged`` bugs should even contain tips on how they -should be fixed. You can find both of them in -`Zaqar's Confirmed and Triaged bugs`_ web page. - -Once you selected the bug you want to work on, go ahead and assign it to -yourself, branch the code, implement the fix, and propose your change for -review. You can find information on how to do it in -:doc:`first_patch` manual. - -Some easy-to-fix bugs may be marked with the ``low-hanging-fruit`` tag: those -are good targets for a beginner. - -Bug triaging -^^^^^^^^^^^^ - -You can also help Zaqar with bug triaging. Reported bugs need care: -prioritizing them correctly, confirming them, making sure they don't go stale. -All those tasks help immensely. If you want to start contributing in coding, -but you are not a hardcore developer, consider helping in this area. - -Bugs can be marked with different tags according to their status: - -* ``New`` bugs are those bugs that have been reported by a user but haven't - been verified by the community yet. -* ``Confirmed`` bugs are those bugs that have been reproduced by someone else - than the reporter. -* ``Triaged`` bugs are those bugs that have been reproduced by a core - developer. -* ``Incomplete`` bugs are those bugs that don't have enough information to be - reproduced. -* ``In Progress`` bugs are those bugs that are being fixed by some developer. - This status is set automatically by the Gerrit review system once a fix is - proposed by a developer. You don't need to set it manually. -* ``Invalid`` bugs are those bugs that don't qualify as a bug. Usually a - support request or something unrelated to the project. - -You can learn more about this in Launchpad's `Of Bugs and Statuses`_. - -You only have to worry about ``New`` bugs. If you can reproduce them, you can -mark them as ``Confirmed``. If you cannot reproduce them, you can ask the -reported to provide more information and mark them as ``Incomplete``. If you -consider that they aren't bugs, mark them as ``Invalid`` (Be careful with this. -Asking someone else in Zaqar is always a good idea). - -Also, you can contribute instructions on how to fix a given bug. - -Check out the `Bug Triage`_ wiki for more information. - -Reviewing -^^^^^^^^^ - -Every patch submitted to OpenStack gets reviewed before it can be approved and -merged. Zaqar gets a lot of contributions and everyone can (and is encouraged -to) review Zaqar's existing patches. Pick an open review and go through -it, test it if possible, and leave a comment with a ``+1`` or ``-1`` vote -describing what you discovered. If you're planning on submitting patches of -your own, it's a great way to learn about what the community cares about and to -learn about the code base. - -Make sure you read :doc:`first_review` manual. - -Feature development -^^^^^^^^^^^^^^^^^^^ - -Once you get familiar with the code, you can start to contribute new features. -New features get implemented every 6 months in `OpenStack development cycle`_. -We use `Launchpad Blueprints`_ to track the design and implementation of -significant features, and Zaqar team uses Design Summits every 6 months to -get together and discuss things in person with the rest of the community. Code -should be proposed for inclusion before Zaqar reach the final feature milestone -of the development cycle. - -Testing -------- - -Testing efforts are highly related to coding. If you find that there are test -cases missing or that some tests could be improved, you are encouraged to -report it as a bug and then provide your fix. - -See :doc:`running_tests` and :doc:`test_suite` for information on how to run -tests and how the tests are organized in Zaqar. - -See :doc:`first_patch` for information on how to provide your fix. - - -Documenting ------------ - -You can contribute to `Zaqar's Contributor Documentation`_ which you are -currently reading and to `Zaqar's Wiki`_. - -To fix a documentation bug check the bugs marked with the ``doc`` tag in -Zaqar's bug list. In case that you want to report a documentation bug, then -don't forget to add the ``doc`` tag to it. - -`Zaqar's Contributor Documentation`_ is compiled from source files in ``.rst`` -(reStructuredText) format located in ``doc/source/`` directory in Zaqar -repository. The `"openstack-manuals" project`_ houses the documentation that is -published to ``docs.openstack.org``. - -Before contributing to `Zaqar's Contributor Documentation`_ you have to read -:doc:`first_patch` manual and `OpenStack Documentation Contributor Guide`_. - -Also, you can monitor `Ask OpenStack`_ to curate the best answers that can be -folded into the documentation. - -Designing ---------- - -Zaqar doesn't have a user interface yet. Zaqar team is working to -`integrate Zaqar to the OpenStack Dashboard (Horizon)`_. - -If you're a designer or usability professional your help will be really -appreciated. Whether it's reviewing upcoming features as a user and giving -feedback, designing features, testing designs or features with users, or -helping to build use cases and requirements, everything is useful. - -Translating ------------ - -You can translate Zaqar to language you know. -Read the `Translation wiki page`_ for more information on how OpenStack manages -translations. Zaqar has adopted Zanata, and you can use the -`OpenStack Zanata site`_ as a starting point to translate any of the OpenStack -projects, including Zaqar. It's easier to start translating directly on the -`OpenStack Zanata site`_, as there is no need to download any files or -applications to get started. - - -.. _`mailing lists` : https://wiki.openstack.org/wiki/MailingLists -.. _`OpenStack IRC wiki` : https://wiki.openstack.org/wiki/IRC -.. _`Ask OpenStack` : https://ask.openstack.org/ -.. _`Zaqar's Confirmed and Triaged bugs` : https://bugs.launchpad.net/zaqar/+bugs?field.searchtext=&orderby=-importance&search=Search&field.status%3Alist=CONFIRMED&field.status%3Alist=TRIAGED&assignee_option=any&field.assignee=&field.bug_reporter=&field.bug_commenter=&field.subscriber=&field.structural_subscriber=&field.tag=&field.tags_combinator=ANY&field.has_cve.used=&field.omit_dupes.used=&field.omit_dupes=on&field.affects_me.used=&field.has_patch.used=&field.has_branches.used=&field.has_branches=on&field.has_no_branches.used=&field.has_no_branches=on&field.has_blueprints.used=&field.has_blueprints=on&field.has_no_blueprints.used=&field.has_no_blueprints=on -.. _`Of Bugs and Statuses` : http://blog.launchpad.net/general/of-bugs-and-statuses -.. _`Bug Triage` : https://wiki.openstack.org/wiki/BugTriage -.. _`OpenStack development cycle` : https://wiki.openstack.org/wiki/ReleaseCycle -.. _`Launchpad Blueprints` : https://wiki.openstack.org/wiki/Blueprints -.. _`OpenStack Documentation Contributor Guide` : http://docs.openstack.org/contributor-guide/index.html -.. _`Zaqar's Contributor Documentation` : http://docs.openstack.org/developer/zaqar/ -.. _`Zaqar's Wiki` : https://wiki.openstack.org/wiki/Zaqar -.. _`"openstack-manuals" project` : https://wiki.openstack.org/wiki/Documentation -.. _`integrate Zaqar to the OpenStack Dashboard (Horizon)` : https://blueprints.launchpad.net/zaqar/+spec/zaqar-horizon-integration -.. _`Translation wiki page` : https://wiki.openstack.org/wiki/Translations#Translation_.26_Management -.. _`OpenStack Zanata site` : https://translate.openstack.org/ diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index 7890fc5f..00000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,77 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======== -Glossary -======== - -Messaging Service Concepts -========================== -The Messaging Service is a multi-tenant, message queue implementation that -utilizes a RESTful HTTP interface to provide an asynchronous communications -protocol, which is one of the main requirements in today’s scalable applications. - -.. glossary:: - - Queue - Queue is a logical entity that groups messages. Ideally a queue is created - per work type. For example, if you want to compress files, you would create - a queue dedicated for this job. Any application that reads from this queue - would only compress files. - - Message - Message is sent through a queue and exists until it is deleted by a recipient - or automatically by the system based on a TTL (time-to-live) value. - - Claim - Claim is a mechanism to mark messages so that other workers will not process the same message. - - Worker - Worker is an application that reads one or multiple messages from the queue. - - Producer - Producer is an application that creates messages in one or multiple queues. - - Publish - Subscribe - Publish - Subscribe is a pattern where all worker applications have access - to all messages in the queue. Workers can not delete or update messages. - - Producer - Consumer - Producer - Consumer is a pattern where each worker application that reads - the queue has to claim the message in order to prevent duplicate processing. - Later, when the work is done, the worker is responsible for deleting the - message. If message is not deleted in a predefined time (claim TTL), it can - be claimed by other workers. - - Message TTL - Message TTL is time-to-live value and defines how long a message will be accessible. - - Claim TTL - Claim TTL is time-to-live value and defines how long a message will be in - claimed state. A message can be claimed by one worker at a time. - - Queues Database - Queues database stores the information about the queues and the messages - within these queues. Storage layer has to guarantee durability and availability of the data. - - Pooling - If pooling enabled, queuing service uses multiple queues databases in order - to scale horizontally. A pool (queues database) can be added anytime without - stopping the service. Each pool has a weight that is assigned during the - creation time but can be changed later. Pooling is done by queue which - indicates that all messages for a particular queue can be found in the same pool (queues database). - - Catalog Database - If pooling is enabled, catalog database has to be created. Catalog database - maintains ``queues`` to ``queues database`` mapping. Storage layer has - to guarantee durability and availability of data. diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index b39dc190..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,108 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -===================================== -Welcome to the Zaqar's Documentation! -===================================== - -Zaqar is a multi-tenant cloud messaging and notification service for web -and mobile developers. - -The service features a REST API, which developers can use to send messages -between various components of their SaaS and mobile applications, by using a -variety of communication patterns. Underlying this API is an efficient -messaging engine designed with scalability and security in mind. The Websocket -API is also available. - -Other OpenStack components can integrate with Zaqar to surface events to end -users and to communicate with guest agents that run in the "over-cloud" layer. - -Key features ------------- - -Zaqar provides the following key features: - -* Choice between two communication transports. Both with Keystone support: - - * Firewall-friendly, **HTTP-based RESTful API**. Many of today's developers - prefer a more web-friendly HTTP API. They value the simplicity and - transparency of the protocol, it's firewall-friendly nature, and it's huge - ecosystem of tools, load balancers and proxies. In addition, cloud - operators appreciate the scalability aspects of the REST architectural - style. - * **Websocket-based API** for persistent connections. Websocket protocol - provides communication over persistent connections. Unlike HTTP, where - new connections are opened for each request/response pair, Websocket can - transfer multiple requests/responses over single TCP connection. It saves - much network traffic and minimizes delays. - -* Multi-tenant queues based on Keystone project IDs. -* Support for several common patterns including event broadcasting, task - distribution, and point-to-point messaging. -* Component-based architecture with support for custom backends and message - filters. -* Efficient reference implementation with an eye toward low latency and high - throughput (dependent on backend). -* Highly-available and horizontally scalable. -* Support for subscriptions to queues. Several notification types are - available: - - * Email notifications. - * Webhook notifications. - * Websocket notifications. - -Project scope -------------- - -The Zaqar API is data-oriented. That is, it does not provision message brokers -and expose those directly to clients. Instead, the API acts as a bridge between -the client and one or more backends. A provisioning service for message -brokers, however useful, serves a somewhat different market from what Zaqar is -targeting today. With that in mind, if users are interested in a broker -provisioning service, the community should consider starting a new project to -address that need. - -Design principles ------------------ - -Zaqar, as with all OpenStack projects, is designed with the following -guidelines in mind: - -* **Component-based architecture.** Quickly add new behaviors -* **Highly available and scalable.** Scale to very serious workloads -* **Fault tolerant.** Isolated processes avoid cascading failures -* **Recoverable.** Failures should be easy to diagnose, debug, and rectify -* **Open standards.** Be a reference implementation for a community-driven - -Contents --------- -.. toctree:: - :maxdepth: 2 - - user/index - admin/index - install/index - contributor/index - -.. toctree:: - :maxdepth: 1 - - glossary - - - - - - - - diff --git a/doc/source/install/get_started.rst b/doc/source/install/get_started.rst deleted file mode 100644 index 8b30d02e..00000000 --- a/doc/source/install/get_started.rst +++ /dev/null @@ -1,65 +0,0 @@ -========================== -Messaging service overview -========================== - -The Message service is multi-tenant, fast, reliable, and scalable. It allows -developers to share data between distributed application components performing -different tasks, without losing messages or requiring each component to be -always available. - -The service features a RESTful API and a Websocket API, which developers can -use to send messages between various components of their SaaS and mobile -applications, by using a variety of communication patterns. - -Key features -~~~~~~~~~~~~ - -The Messaging service provides the following key features: - -* Choice between two communication transports. Both with Identity service - support: - - * Firewall-friendly, **HTTP-based RESTful API**. Many of today's developers - prefer a more web-friendly HTTP API. They value the simplicity and - transparency of the protocol, its firewall-friendly nature, and its huge - ecosystem of tools, load balancers and proxies. In addition, cloud - operators appreciate the scalability aspects of the REST architectural - style. - * **Websocket-based API** for persistent connections. Websocket protocol - provides communication over persistent connections. Unlike HTTP, where - new connections are opened for each request/response pair, Websocket can - transfer multiple requests/responses over single TCP connection. It saves - much network traffic and minimizes delays. - -* Multi-tenant queues based on Identity service IDs. -* Support for several common patterns including event broadcasting, task - distribution, and point-to-point messaging. -* Component-based architecture with support for custom back ends and message - filters. -* Efficient reference implementation with an eye toward low latency and high - throughput (dependent on back end). -* Highly-available and horizontally scalable. -* Support for subscriptions to queues. Several notification types are - available: - - * Email notifications - * Webhook notifications - * Websocket notifications - -Layers of the Messaging service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Messaging service has following layers: - -* The transport layer (Messaging application) which can provide these APIs: - - * HTTP RESTful API (via ``wsgi`` driver). - * Websocket API (via ``websocket`` driver). - -* The storage layer which keeps all the data and metadata about queues and - messages. It has two sub-layers: - - * The management store database (Catalog). Can be ``MongoDB`` database (or - ``MongoDB`` replica-set) or SQL database. - * The message store databases (Pools). Can be ``MongoDB`` database (or - ``MongoDB`` replica-set) or ``Redis`` database. diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 89b85bbf..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,41 +0,0 @@ -================== -Installation Guide -================== - -.. toctree:: - - get_started.rst - install.rst - verify.rst - next-steps.rst - -The Messaging service is multi-tenant, fast, reliable, and scalable. It allows -developers to share data between distributed application components performing -different tasks, without losing messages or requiring each component to be -always available. - -The service features a RESTful API and a Websocket API, which developers can -use to send messages between various components of their SaaS and mobile -applications, by using a variety of communication patterns. - -This chapter assumes a working setup of OpenStack following the base -Installation Guide. - - -Ocata -~~~~~ - -To install Glance, see the Ocata Image service install guide for each distribution: - -- `Ubuntu `__ -- `CentOS and RHEL `__ -- `openSUSE and SUSE Linux Enterprise `__ - -Newton -~~~~~~ - -To install Glance, see the Newton Image service install guide for each distribution: - -- `Ubuntu `__ -- `CentOS and RHEL `__ -- `openSUSE and SUSE Linux Enterprise `__ diff --git a/doc/source/install/install-obs.rst b/doc/source/install/install-obs.rst deleted file mode 100644 index 82408fdc..00000000 --- a/doc/source/install/install-obs.rst +++ /dev/null @@ -1,545 +0,0 @@ -.. _install-obs: - -Install and configure for openSUSE and SUSE Linux Enterprise -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Messaging service -for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1. - -This section assumes that you already have a working OpenStack environment with -at least Identity service installed. - -Here you can find instructions and recommended settings for installing -Messaging service in small configuration: one web server with Messaging service -configured to use replica-set of three ``MongoDB`` database servers. Because -only one web server is used, the Messaging service installed by using these -instructions can't be considered as high available, see :doc:`install`. - -In this tutorial these server names are used as examples: - -* Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. -* Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, - ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. -* Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. - -Prerequisites -------------- - -Before you install Messaging service, you must meet the following system -requirements: - -* Installed Identity service for user and project management. -* Python 2.7. - -Before you install and configure Messaging, you must create a ``MongoDB`` -replica-set of three database servers. Also you need to create service -credentials and API endpoints in Identity. - -#. Install and configure ``MongoDB`` replica-set on database servers: - - #. Install ``MongoDB`` on the database servers: - - On each database server follow the official `MongoDB installation - instructions`_. - - .. note:: - - Messaging service works with ``MongoDB`` versions >= 2.4 - - #. Configure ``MongoDB`` on the database servers: - - On each database server edit configuration file: ``/etc/mongod.conf`` and - modify as needed: - - .. code-block:: ini - - # MongoDB sample configuration for Messaging service. - # (For MongoDB version >= 2.6) - # Edit according to your needs. - systemLog: - destination: file - logAppend: true - path: /var/log/mongodb/mongod.log - - storage: - dbPath: /var/lib/mongo - journal: - enabled: false - - processManagement: - fork: true # fork and run in background - pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile - - net: - port: 27017 - # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. - - operationProfiling: - slowOpThresholdMs: 200 - mode: slowOp - - replication: - oplogSizeMB: 2048 - replSetName: catalog - - .. note:: - - In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration - file should be written in different format. For information about - format for different versions see the official `MongoDB configuration - reference`_. - - .. warning:: - - Additional steps are required to secure ``MongoDB`` installation. You - should modify this configuration for your security requirements. See - the official `MongoDB security reference`_. - - #. Start ``MongoDB`` on the database servers: - - Start ``MongoDB`` service on all database servers: - - .. code-block:: console - - # service mongod start - - Make ``MongoDB`` service start automatically after reboot: - - .. code-block:: console - - # chkconfig mongod on - - #. Configure ``MongoDB`` Replica Set on the database servers: - - Once you've installed ``MongoDB`` on three servers and assuming that the - primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go - to ``MYDB0`` and run these commands: - - .. code-block:: console - - # mongo local --eval "printjson(rs.initiate())" - # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" - # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" - - .. note:: - - The database servers must have access to each other and also be - accessible from the Messaging service web server. Configure firewalls - on all database servers to accept incoming connections to port - ``27017`` from the needed source. - - To check if the replica-set is established see the output of this - command: - - .. code-block:: console - - # mongo local --eval "printjson(rs.status())" - -#. Source the ``admin`` credentials to gain access to admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - #. Create the ``zaqar`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt zaqar - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 7b0ffc83097148dab6ecbef6ddcc46bf | - | name | zaqar | - +-----------+----------------------------------+ - - #. Add the ``admin`` role to the ``zaqar`` user: - - .. code-block:: console - - $ openstack role add --project service --user zaqar admin - - .. note:: - - This command provides no output. - - #. Create the ``zaqar`` service entity: - - .. code-block:: console - - $ openstack service create --name zaqar --description "Messaging" messaging - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Messaging | - | enabled | True | - | id | b39c22818be5425ba2315dd4b10cd57c | - | name | zaqar | - | type | messaging | - +-------------+----------------------------------+ - -#. Create the Messaging service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | aabca78860e74c4db0bcb36167bfe106 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - - $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | 07f9524613de4fd3905e13a87f81fd3f | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - - $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | 686f7b19428f4b5aa1425667dfe4f49d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - -Install and configure Messaging web server ------------------------------------------- - -Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server -``WEB0.EXAMPLE-MESSAGES.NET``. - -#. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order - to cache Identity service tokens and catalog mappings: - - .. code-block:: console - - # zypper install memcached - - Start ``memcached`` service: - - .. code-block:: console - - # /etc/init.d/memcached start - - Make ``memcached`` service start automatically after reboot: - - .. code-block:: console - - # chkconfig memcached on - -#. Install Messaging service and ``uWSGI``: - - .. code-block:: console - - # zypper install python-pip - # git clone https://git.openstack.org/openstack/zaqar.git - # cd zaqar - # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log - # pip install --upgrade pymongo gevent uwsgi - -#. Copy the Zaqar RBAC policy sample file to the directory ``etc/zaqar/``: - - .. code-block:: console - - # mkdir - # cp etc/policy.json.sample /etc/zaqar/policy.json - -#. Create log file: - - .. code-block:: console - - # touch /var/log/zaqar-server.log - # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log - # chmod 600 /var/log/zaqar-server.log - - Replace ``ZAQARUSER`` with the name of the user in system under which the - Messaging service will run. - -#. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files. - -#. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: - - .. code-block:: python - - from keystonemiddleware import auth_token - from zaqar.transport.wsgi import app - - app = auth_token.AuthProtocol(app.app, {}) - -#. Increase backlog listen limit from default (128): - - .. code-block:: console - - # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf - -#. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify - as needed: - - .. code-block:: ini - - [uwsgi] - https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY - pidfile = /var/run/zaqar.pid - gevent = 2000 - gevent-monkey-patch = true - listen = 1024 - enable-threads = true - module = zaqar_uwsgi:app - workers = 4 - harakiri = 60 - add-header = Connection: close - - Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate - (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's - private key (``*.key``). - - .. note:: - - The ``uWSGI`` configuration options above can be modified for different - security and performance requirements including load balancing. See the - official `uWSGI configuration reference`_. - -#. Create Messaging service's configuration file ``/etc/zaqar.conf`` with the - following content: - - .. code-block:: ini - - [DEFAULT] - # Show debugging output in logs (sets DEBUG log level output) - #debug = False - - # Pooling and admin mode configs - pooling = True - admin_mode = True - - # Log to file - log_file = /var/log/zaqar-server.log - - # This is taken care of in our custom app.py, so disable here - ;auth_strategy = keystone - - # Modify to make it work with your Identity service. - [keystone_authtoken] - project_domain_name = Default - user_domain_name = Default - project_domain_id = default - project_name = service - user_domain_id = default - # File path to a PEM encoded Certificate Authority to use when verifying - # HTTPs connections. Defaults to system CAs if commented. - cafile = PATH_TO_CA_FILE - # Messaging service user name in Identity service. - username = ZAQARIDENTITYUSER - # Messaging service password in Identity service. - password = ZAQARIDENTITYPASSWORD - # Complete public Identity API endpoint (HTTPS protocol is more preferable - # than HTTP). - auth_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 - # Complete admin Identity API endpoint (HTTPS protocol is more preferable - # than HTTP). - auth_url = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:35357 - # Token cache time in seconds. - token_cache_time = TOKEN_CACHE_TIME - memcached_servers = 127.0.0.1:11211 - - [cache] - # Dogpile.cache backend module. It is recommended that Memcache with - # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be - # used in production deployments. Small workloads (single process) - # like devstack can use the dogpile.cache.memory backend. (string - # value) - backend = dogpile.cache.memory - memcache_servers = 127.0.0.1:11211 - - [drivers] - transport = wsgi - message_store = mongodb - management_store = mongodb - - [drivers:management_store:mongodb] - # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, - # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set - # accordingly. - uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred - - # Name for the database on mongodb server. - database = zaqarmanagementstore - - # Number of databases across which to partition message data, in order - # to reduce writer lock %. DO NOT change this setting after initial - # deployment. It MUST remain static. Also, you should not need a large - # number of partitions to improve performance, esp. if deploying - # MongoDB on SSD storage. (integer value) - partitions = 8 - - # Uncomment any options below if needed. - - # Maximum number of times to retry a failed operation. Currently - # only used for retrying a message post. - ;max_attempts = 1000 - - # Maximum sleep interval between retries (actual sleep time - # increases linearly according to number of attempts performed). - ;max_retry_sleep = 0.1 - - # Maximum jitter interval, to be added to the sleep interval, in - # order to decrease probability that parallel requests will retry - # at the same instant. - ;max_retry_jitter = 0.005 - - # Frequency of message garbage collections, in seconds - ;gc_interval = 5 * 60 - - # Threshold of number of expired messages to reach in a given - # queue, before performing the GC. Useful for reducing frequent - # locks on the DB for non-busy queues, or for worker queues - # which process jobs quickly enough to keep the number of in- - # flight messages low. - # - # Note: The higher this number, the larger the memory-mapped DB - # files will be. - ;gc_threshold = 1000 - - [drivers:message_store:mongodb] - # This section has same set of available options as - # "[drivers:management_store:mongodb]" section. - # - # If pooling is enabled, all pools inherit values from options in these - # settings unless overridden in pool creation request. Also "uri" option - # value isn't used in case of pooling. - # - # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, - # ssl_ca_certs options need to be set accordingly. - - # Name for the database on MondoDB server. - database = zaqarmessagestore - - [transport] - max_queues_per_page = 1000 - max_queue_metadata = 262144 - max_mesages_per_page = 10 - max_messages_post_size = 262144 - max_message_ttl = 1209600 - max_claim_ttl = 43200 - max_claim_grace = 43200 - - [signed_url] - # Secret key used to encrypt pre-signed URLs. (string value) - secret_key = SOMELONGSECRETKEY - - Edit any options as needed, especially the options with capitalized values. - -#. Create a service file for Messaging service - ``/etc/systemd/system/zaqaruwsgi.service``: - - .. code-block:: ini - - [Unit] - Description=uWSGI Zaqar - After=syslog.target - - [Service] - ExecStart=/usr/bin/uwsgi --ini /srv/zaqar/uwsgi.ini - # Requires systemd version 211 or newer - RuntimeDirectory=uwsgi - Restart=always - KillSignal=SIGQUIT - Type=notify - StandardError=syslog - NotifyAccess=all - User=ZAQARUSER - Group=ZAQARUSER - - [Install] - WantedBy=multi-user.target - - Replace ``ZAQARUSER`` with the name of the user in system under which the - Messaging service will run. - -Finalize installation ---------------------- - -Now after you have configured the web server and the database servers to have a -functional Messaging service, you need to start the service, make the service -automatically start with the system and define the created ``MongoDB`` -replica-set as Messaging's pool. - -#. Start Messaging service on the web server: - - .. code-block:: console - - # systemctl start zaqar.uwsgi.service - -#. Make Messaging service start automatically after reboot on the web server: - - .. code-block:: console - - # systemctl enable zaqar.uwsgi.service - -#. Configure pool: - - .. code-block:: console - - # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ - -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ - -H "Client-ID: CLIENT_ID" \ - -H "X-Auth-Token: TOKEN" \ - -H "Content-type: application/json" \ - - Replace ``POOL1`` variable with the desired name of a pool. - - Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) - which can be generated by, for example, ``uuidgen`` utility. - - Replace ``TOKEN`` variable with the authentication token retrieved from - Identity service. If you choose not to enable Keystone authentication you - won't have to pass a token. - - .. note:: - - The ``options`` key in curl request above overrides any options - (specified in configuration file or default) in - ``[drivers:message_store:mongodb]`` Messaging service configuration - file's section. - -.. tip:: - - In larger deployments, there should be many load balanced web servers. Also - the management store databases and the message store databases (pools) - should be on different ``MongoDB`` replica-sets. - -.. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-suse/ -.. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ -.. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ -.. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/ diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index b756bee9..00000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,545 +0,0 @@ -.. _install-rdo: - -Install and configure for Red Hat Enterprise Linux and CentOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Messaging service, -code-named ``zaqar`` for Red Hat Enterprise Linux 7 and CentOS 7. - -This section assumes that you already have a working OpenStack environment with -at least Identity service installed. - -Here you can find instructions and recommended settings for installing -Messaging service in small configuration: one web server with Messaging service -configured to use replica-set of three ``MongoDB`` database servers. Because -only one web server is used, the Messaging service installed by using these -instructions can't be considered as high available, see :doc:`install`. - -In this tutorial these server names are used as examples: - -* Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. -* Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, - ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. -* Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. - -Prerequisites -------------- - -Before you install Messaging service, you must meet the following system -requirements: - -* Installed Identity service for user and project management. -* Python 2.7. - -Before you install and configure Messaging, you must create a ``MongoDB`` -replica-set of three database servers. Also you need to create service -credentials and API endpoints in Identity. - -#. Install and configure ``MongoDB`` replica-set on database servers: - - #. Install ``MongoDB`` on the database servers: - - On each database server follow the official `MongoDB installation - instructions`_. - - .. note:: - - Messaging service works with ``MongoDB`` versions >= 2.4 - - #. Configure ``MongoDB`` on the database servers: - - On each database server edit configuration file: ``/etc/mongod.conf`` and - modify as needed: - - .. code-block:: ini - - # MongoDB sample configuration for Messaging service. - # (For MongoDB version >= 2.6) - # Edit according to your needs. - systemLog: - destination: file - logAppend: true - path: /var/log/mongodb/mongod.log - - storage: - dbPath: /var/lib/mongo - journal: - enabled: false - - processManagement: - fork: true # fork and run in background - pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile - - net: - port: 27017 - # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. - - operationProfiling: - slowOpThresholdMs: 200 - mode: slowOp - - replication: - oplogSizeMB: 2048 - replSetName: catalog - - .. note:: - - In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration - file should be written in different format. For information about - format for different versions see the official `MongoDB configuration - reference`_. - - .. warning:: - - Additional steps are required to secure ``MongoDB`` installation. You - should modify this configuration for your security requirements. See - the official `MongoDB security reference`_. - - #. Start ``MongoDB`` on the database servers: - - Start ``MongoDB`` service on all database servers: - - .. code-block:: console - - # systemctl start mongod - - Make ``MongoDB`` service start automatically after reboot: - - .. code-block:: console - - # systemctl enable mongod - - #. Configure ``MongoDB`` Replica Set on the database servers: - - Once you've installed ``MongoDB`` on three servers and assuming that the - primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go - to ``MYDB0`` and run these commands: - - .. code-block:: console - - # mongo local --eval "printjson(rs.initiate())" - # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" - # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" - - .. note:: - - The database servers must have access to each other and also be - accessible from the Messaging service web server. Configure firewalls - on all database servers to accept incoming connections to port - ``27017`` from the needed source. - - To check if the replica-set is established see the output of this - command: - - .. code-block:: console - - # mongo local --eval "printjson(rs.status())" - -#. Source the ``admin`` credentials to gain access to admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - #. Create the ``zaqar`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt zaqar - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 7b0ffc83097148dab6ecbef6ddcc46bf | - | name | zaqar | - +-----------+----------------------------------+ - - #. Add the ``admin`` role to the ``zaqar`` user: - - .. code-block:: console - - $ openstack role add --project service --user zaqar admin - - .. note:: - - This command provides no output. - - #. Create the ``zaqar`` service entity: - - .. code-block:: console - - $ openstack service create --name zaqar --description "Messaging" messaging - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Messaging | - | enabled | True | - | id | b39c22818be5425ba2315dd4b10cd57c | - | name | zaqar | - | type | messaging | - +-------------+----------------------------------+ - -#. Create the Messaging service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | aabca78860e74c4db0bcb36167bfe106 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - - $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | 07f9524613de4fd3905e13a87f81fd3f | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - - $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | 686f7b19428f4b5aa1425667dfe4f49d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - -Install and configure Messaging web server ------------------------------------------- - -Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server -``WEB0.EXAMPLE-MESSAGES.NET``. - -#. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order - to cache Identity service tokens and catalog mappings: - - .. code-block:: console - - # yum install memcached - - Start ``memcached`` service: - - .. code-block:: console - - # systemctl start memcached - - Make ``memcached`` service start automatically after reboot: - - .. code-block:: console - - # systemctl enable memcached - -#. Install Messaging service and ``uWSGI``: - - .. code-block:: console - - # yum -y install python-pip - # git clone https://git.openstack.org/openstack/zaqar.git - # cd zaqar - # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log - # pip install --upgrade pymongo gevent uwsgi - -#. Copy the Zaqar RBAC policy sample file to the directory ``etc/zaqar/``: - - .. code-block:: console - - # mkdir - # cp etc/policy.json.sample /etc/zaqar/policy.json - -#. Create log file: - - .. code-block:: console - - # touch /var/log/zaqar-server.log - # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log - # chmod 600 /var/log/zaqar-server.log - - Replace ``ZAQARUSER`` with the name of the user in system under which the - Messaging service will run. - -#. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files. - -#. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: - - .. code-block:: python - - from keystonemiddleware import auth_token - from zaqar.transport.wsgi import app - - app = auth_token.AuthProtocol(app.app, {}) - -#. Increase backlog listen limit from default (128): - - .. code-block:: console - - # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf - -#. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify - as needed: - - .. code-block:: ini - - [uwsgi] - https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY - pidfile = /var/run/zaqar.pid - gevent = 2000 - gevent-monkey-patch = true - listen = 1024 - enable-threads = true - module = zaqar_uwsgi:app - workers = 4 - harakiri = 60 - add-header = Connection: close - - Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate - (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's - private key (``*.key``). - - .. note:: - - The ``uWSGI`` configuration options above can be modified for different - security and performance requirements including load balancing. See the - official `uWSGI configuration reference`_. - -#. Create Messaging service's configuration file ``/etc/zaqar.conf`` with the - following content: - - .. code-block:: ini - - [DEFAULT] - # Show debugging output in logs (sets DEBUG log level output) - #debug = False - - # Pooling and admin mode configs - pooling = True - admin_mode = True - - # Log to file - log_file = /var/log/zaqar-server.log - - # This is taken care of in our custom app.py, so disable here - ;auth_strategy = keystone - - # Modify to make it work with your Identity service. - [keystone_authtoken] - project_domain_name = Default - user_domain_name = Default - project_domain_id = default - project_name = service - user_domain_id = default - # File path to a PEM encoded Certificate Authority to use when verifying - # HTTPs connections. Defaults to system CAs if commented. - cafile = PATH_TO_CA_FILE - # Messaging service user name in Identity service. - username = ZAQARIDENTITYUSER - # Messaging service password in Identity service. - password = ZAQARIDENTITYPASSWORD - # Complete public Identity API endpoint (HTTPS protocol is more preferable - # than HTTP). - auth_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 - # Complete admin Identity API endpoint (HTTPS protocol is more preferable - # than HTTP). - auth_url = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:35357 - # Token cache time in seconds. - token_cache_time = TOKEN_CACHE_TIME - memcached_servers = 127.0.0.1:11211 - - [cache] - # Dogpile.cache backend module. It is recommended that Memcache with - # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be - # used in production deployments. Small workloads (single process) - # like devstack can use the dogpile.cache.memory backend. (string - # value) - backend = dogpile.cache.memory - memcache_servers = 127.0.0.1:11211 - - [drivers] - transport = wsgi - message_store = mongodb - management_store = mongodb - - [drivers:management_store:mongodb] - # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, - # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set - # accordingly. - uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred - - # Name for the database on mongodb server. - database = zaqarmanagementstore - - # Number of databases across which to partition message data, in order - # to reduce writer lock %. DO NOT change this setting after initial - # deployment. It MUST remain static. Also, you should not need a large - # number of partitions to improve performance, esp. if deploying - # MongoDB on SSD storage. (integer value) - partitions = 8 - - # Uncomment any options below if needed. - - # Maximum number of times to retry a failed operation. Currently - # only used for retrying a message post. - ;max_attempts = 1000 - - # Maximum sleep interval between retries (actual sleep time - # increases linearly according to number of attempts performed). - ;max_retry_sleep = 0.1 - - # Maximum jitter interval, to be added to the sleep interval, in - # order to decrease probability that parallel requests will retry - # at the same instant. - ;max_retry_jitter = 0.005 - - # Frequency of message garbage collections, in seconds - ;gc_interval = 5 * 60 - - # Threshold of number of expired messages to reach in a given - # queue, before performing the GC. Useful for reducing frequent - # locks on the DB for non-busy queues, or for worker queues - # which process jobs quickly enough to keep the number of in- - # flight messages low. - # - # Note: The higher this number, the larger the memory-mapped DB - # files will be. - ;gc_threshold = 1000 - - [drivers:message_store:mongodb] - # This section has same set of available options as - # "[drivers:management_store:mongodb]" section. - # - # If pooling is enabled, all pools inherit values from options in these - # settings unless overridden in pool creation request. Also "uri" option - # value isn't used in case of pooling. - # - # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, - # ssl_ca_certs options need to be set accordingly. - - # Name for the database on MondoDB server. - database = zaqarmessagestore - - [transport] - max_queues_per_page = 1000 - max_queue_metadata = 262144 - max_mesages_per_page = 10 - max_messages_post_size = 262144 - max_message_ttl = 1209600 - max_claim_ttl = 43200 - max_claim_grace = 43200 - - [signed_url] - # Secret key used to encrypt pre-signed URLs. (string value) - secret_key = SOMELONGSECRETKEY - - Edit any options as needed, especially the options with capitalized values. - -#. Create a service file for Messaging service - ``/etc/systemd/system/zaqaruwsgi.service``: - - .. code-block:: ini - - [Unit] - Description=uWSGI Zaqar - After=syslog.target - - [Service] - ExecStart=/usr/bin/uwsgi --ini /srv/zaqar/uwsgi.ini - # Requires systemd version 211 or newer - RuntimeDirectory=uwsgi - Restart=always - KillSignal=SIGQUIT - Type=notify - StandardError=syslog - NotifyAccess=all - User=ZAQARUSER - Group=ZAQARUSER - - [Install] - WantedBy=multi-user.target - - Replace ``ZAQARUSER`` with the name of the user in system under which the - Messaging service will run. - -Finalize installation ---------------------- - -Now after you have configured the web server and the database servers to have a -functional Messaging service, you need to start the service, make the service -automatically start with the system and define the created ``MongoDB`` -replica-set as Messaging's pool. - -#. Start Messaging service on the web server: - - .. code-block:: console - - # systemctl start zaqar.uwsgi.service - -#. Make Messaging service start automatically after reboot on the web server: - - .. code-block:: console - - # systemctl enable zaqar.uwsgi.service - -#. Configure pool: - - .. code-block:: console - - # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ - -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ - -H "Client-ID: CLIENT_ID" \ - -H "X-Auth-Token: TOKEN" \ - -H "Content-type: application/json" \ - - Replace ``POOL1`` variable with the desired name of a pool. - - Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) - which can be generated by, for example, ``uuidgen`` utility. - - Replace ``TOKEN`` variable with the authentication token retrieved from - Identity service. If you choose not to enable Keystone authentication you - won't have to pass a token. - - .. note:: - - The ``options`` key in curl request above overrides any options - (specified in configuration file or default) in - ``[drivers:message_store:mongodb]`` Messaging service configuration - file's section. - -.. tip:: - - In larger deployments, there should be many load balanced web servers. Also - the management store databases and the message store databases (pools) - should be on different ``MongoDB`` replica-sets. - -.. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-red-hat/ -.. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ -.. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ -.. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/ diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index 9d524d6e..00000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,529 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Messaging service -for Ubuntu 14.04 (LTS). - -This section assumes that you already have a working OpenStack environment with -at least Identity service installed. - -Here you can find instructions and recommended settings for installing -Messaging service in small configuration: one web server with Messaging service -configured to use replica-set of three ``MongoDB`` database servers. Because -only one web server is used, the Messaging service installed by using these -instructions can't be considered as high available, see :doc:`install`. - -In this tutorial these server names are used as examples: - -* Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. -* Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, - ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. -* Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. - -Prerequisites -------------- - -Before you install Messaging service, you must meet the following system -requirements: - -* Installed Identity service for user and project management. -* Python 2.7. - -Before you install and configure Messaging, you must create a ``MongoDB`` -replica-set of three database servers. Also you need to create service -credentials and API endpoints in Identity. - -#. Install and configure ``MongoDB`` replica-set on database servers: - - #. Install ``MongoDB`` on the database servers: - - On each database server follow the official `MongoDB installation - instructions`_. - - .. note:: - - Messaging service works with ``MongoDB`` versions >= 2.4 - - #. Configure ``MongoDB`` on the database servers: - - On each database server edit configuration file: ``/etc/mongod.conf`` and - modify as needed: - - .. code-block:: ini - - # MongoDB sample configuration for Messaging service. - # (For MongoDB version >= 2.6) - # Edit according to your needs. - systemLog: - destination: file - logAppend: true - path: /var/log/mongodb/mongod.log - - storage: - dbPath: /var/lib/mongo - journal: - enabled: false - - processManagement: - fork: true # fork and run in background - pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile - - net: - port: 27017 - # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. - - operationProfiling: - slowOpThresholdMs: 200 - mode: slowOp - - replication: - oplogSizeMB: 2048 - replSetName: catalog - - .. note:: - - In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration - file should be written in different format. For information about - format for different versions see the official `MongoDB configuration - reference`_. - - .. warning:: - - Additional steps are required to secure ``MongoDB`` installation. You - should modify this configuration for your security requirements. See - the official `MongoDB security reference`_. - - #. Start ``MongoDB`` on the database servers: - - Start ``MongoDB`` service on all database servers: - - .. code-block:: console - - # service mongodb start - - #. Configure ``MongoDB`` Replica Set on the database servers: - - Once you've installed ``MongoDB`` on three servers and assuming that the - primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go - to ``MYDB0`` and run these commands: - - .. code-block:: console - - # mongo local --eval "printjson(rs.initiate())" - # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" - # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" - - .. note:: - - The database servers must have access to each other and also be - accessible from the Messaging service web server. Configure firewalls - on all database servers to accept incoming connections to port - ``27017`` from the needed source. - - To check if the replica-set is established see the output of this - command: - - .. code-block:: console - - # mongo local --eval "printjson(rs.status())" - -#. Source the ``admin`` credentials to gain access to admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - #. Create the ``zaqar`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt zaqar - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 7b0ffc83097148dab6ecbef6ddcc46bf | - | name | zaqar | - +-----------+----------------------------------+ - - #. Add the ``admin`` role to the ``zaqar`` user: - - .. code-block:: console - - $ openstack role add --project service --user zaqar admin - - .. note:: - - This command provides no output. - - #. Create the ``zaqar`` service entity: - - .. code-block:: console - - $ openstack service create --name zaqar --description "Messaging" messaging - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Messaging | - | enabled | True | - | id | b39c22818be5425ba2315dd4b10cd57c | - | name | zaqar | - | type | messaging | - +-------------+----------------------------------+ - -#. Create the Messaging service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | aabca78860e74c4db0bcb36167bfe106 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - - $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | 07f9524613de4fd3905e13a87f81fd3f | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - - $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 - +--------------+---------------------------------------+ - | Field | Value | - +--------------+---------------------------------------+ - | enabled | True | - | id | 686f7b19428f4b5aa1425667dfe4f49d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | b39c22818be5425ba2315dd4b10cd57c | - | service_name | zaqar | - | service_type | messaging | - | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | - +--------------+---------------------------------------+ - -Install and configure Messaging web server ------------------------------------------- - -Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server -``WEB0.EXAMPLE-MESSAGES.NET``. - -#. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order - to cache Identity service tokens and catalog mappings: - - .. code-block:: console - - # apt-get install memcached - - Start ``memcached`` service: - - .. code-block:: console - - # service memcached start - -#. Install Messaging service and ``uWSGI``: - - .. code-block:: console - - # apt-get install python-pip - # git clone https://git.openstack.org/openstack/zaqar.git - # cd zaqar - # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log - # pip install --upgrade pymongo gevent uwsgi - -#. Copy the Zaqar RBAC policy sample file to the directory ``etc/zaqar/``: - - .. code-block:: console - - # mkdir - # cp etc/policy.json.sample /etc/zaqar/policy.json - -#. Create log file: - - .. code-block:: console - - # touch /var/log/zaqar-server.log - # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log - # chmod 600 /var/log/zaqar-server.log - - Replace ``ZAQARUSER`` with the name of the user in system under which the - Messaging service will run. - -#. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files. - -#. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: - - .. code-block:: python - - from keystonemiddleware import auth_token - from zaqar.transport.wsgi import app - - app = auth_token.AuthProtocol(app.app, {}) - -#. Increase backlog listen limit from default (128): - - .. code-block:: console - - # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf - -#. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify - as needed: - - .. code-block:: ini - - [uwsgi] - https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY - pidfile = /var/run/zaqar.pid - gevent = 2000 - gevent-monkey-patch = true - listen = 1024 - enable-threads = true - module = zaqar_uwsgi:app - workers = 4 - harakiri = 60 - add-header = Connection: close - - Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate - (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's - private key (``*.key``). - - .. note:: - - The ``uWSGI`` configuration options above can be modified for different - security and performance requirements including load balancing. See the - official `uWSGI configuration reference`_. - -#. Create Messaging service's configuration file ``/etc/zaqar.conf`` with the - following content: - - .. code-block:: ini - - [DEFAULT] - # Show debugging output in logs (sets DEBUG log level output) - #debug = False - - # Pooling and admin mode configs - pooling = True - admin_mode = True - - # Log to file - log_file = /var/log/zaqar-server.log - - # This is taken care of in our custom app.py, so disable here - ;auth_strategy = keystone - - # Modify to make it work with your Identity service. - [keystone_authtoken] - project_domain_name = Default - user_domain_name = Default - project_domain_id = default - project_name = service - user_domain_id = default - # File path to a PEM encoded Certificate Authority to use when verifying - # HTTPs connections. Defaults to system CAs if commented. - cafile = PATH_TO_CA_FILE - # Messaging service user name in Identity service. - username = ZAQARIDENTITYUSER - # Messaging service password in Identity service. - password = ZAQARIDENTITYPASSWORD - # Complete public Identity API endpoint (HTTPS protocol is more preferable - # than HTTP). - auth_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 - # Complete admin Identity API endpoint (HTTPS protocol is more preferable - # than HTTP). - auth_url = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:35357 - # Token cache time in seconds. - token_cache_time = TOKEN_CACHE_TIME - memcached_servers = 127.0.0.1:11211 - - [cache] - # Dogpile.cache backend module. It is recommended that Memcache with - # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be - # used in production deployments. Small workloads (single process) - # like devstack can use the dogpile.cache.memory backend. (string - # value) - backend = dogpile.cache.memory - memcache_servers = 127.0.0.1:11211 - - [drivers] - transport = wsgi - message_store = mongodb - management_store = mongodb - - [drivers:management_store:mongodb] - # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, - # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set - # accordingly. - uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred - - # Name for the database on mongodb server. - database = zaqarmanagementstore - - # Number of databases across which to partition message data, in order - # to reduce writer lock %. DO NOT change this setting after initial - # deployment. It MUST remain static. Also, you should not need a large - # number of partitions to improve performance, esp. if deploying - # MongoDB on SSD storage. (integer value) - partitions = 8 - - # Uncomment any options below if needed. - - # Maximum number of times to retry a failed operation. Currently - # only used for retrying a message post. - ;max_attempts = 1000 - - # Maximum sleep interval between retries (actual sleep time - # increases linearly according to number of attempts performed). - ;max_retry_sleep = 0.1 - - # Maximum jitter interval, to be added to the sleep interval, in - # order to decrease probability that parallel requests will retry - # at the same instant. - ;max_retry_jitter = 0.005 - - # Frequency of message garbage collections, in seconds - ;gc_interval = 5 * 60 - - # Threshold of number of expired messages to reach in a given - # queue, before performing the GC. Useful for reducing frequent - # locks on the DB for non-busy queues, or for worker queues - # which process jobs quickly enough to keep the number of in- - # flight messages low. - # - # Note: The higher this number, the larger the memory-mapped DB - # files will be. - ;gc_threshold = 1000 - - [drivers:message_store:mongodb] - # This section has same set of available options as - # "[drivers:management_store:mongodb]" section. - # - # If pooling is enabled, all pools inherit values from options in these - # settings unless overridden in pool creation request. Also "uri" option - # value isn't used in case of pooling. - # - # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, - # ssl_ca_certs options need to be set accordingly. - - # Name for the database on MondoDB server. - database = zaqarmessagestore - - [transport] - max_queues_per_page = 1000 - max_queue_metadata = 262144 - max_mesages_per_page = 10 - max_messages_post_size = 262144 - max_message_ttl = 1209600 - max_claim_ttl = 43200 - max_claim_grace = 43200 - - [signed_url] - # Secret key used to encrypt pre-signed URLs. (string value) - secret_key = SOMELONGSECRETKEY - - Edit any options as needed, especially the options with capitalized values. - -#. Create an upstart config, it could be named as ``/etc/init/zaqar.conf``: - - .. code-block:: bash - - description "Zaqar api server" - author "Your Name " - - start on runlevel [2345] - stop on runlevel [!2345] - - chdir /var/run - - pre-start script - mkdir -p /var/run/zaqar - chown zaqar:zaqar /var/run/zaqar - - mkdir -p /var/lock/zaqar - chown zaqar:root /var/lock/zaqar - end script - - exec /usr/bin/uwsgi --master --emperor /etc/zaqar/uwsgi - - -Finalize installation ---------------------- - -Now after you have configured the web server and the database servers to have a -functional Messaging service, you need to start the service, make the service -automatically start with the system and define the created ``MongoDB`` -replica-set as Messaging's pool. - -#. Start Messaging service on the web server: - - .. code-block:: console - - # systemctl start zaqar.uwsgi.service - -#. Make Messaging service start automatically after reboot on the web server: - - .. code-block:: console - - # systemctl enable zaqar.uwsgi.service - -#. Configure pool: - - .. code-block:: console - - # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ - -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ - -H "Client-ID: CLIENT_ID" \ - -H "X-Auth-Token: TOKEN" \ - -H "Content-type: application/json" \ - - Replace ``POOL1`` variable with the desired name of a pool. - - Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) - which can be generated by, for example, ``uuidgen`` utility. - - Replace ``TOKEN`` variable with the authentication token retrieved from - Identity service. If you choose not to enable Keystone authentication you - won't have to pass a token. - - .. note:: - - The ``options`` key in curl request above overrides any options - (specified in configuration file or default) in - ``[drivers:message_store:mongodb]`` Messaging service configuration - file's section. - -.. tip:: - - In larger deployments, there should be many load balanced web servers. Also - the management store databases and the message store databases (pools) - should be on different ``MongoDB`` replica-sets. - -.. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ -.. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ -.. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ -.. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/ diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst deleted file mode 100644 index c20ec2ba..00000000 --- a/doc/source/install/install.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _install: - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Messaging service, -code-named zaqar. - -This section assumes that you already have a working OpenStack environment with -at least Identity service installed. - -Note that installation and configuration vary by distribution. - -.. toctree:: - - install-obs.rst - install-rdo.rst - install-ubuntu.rst - -Possible Minimum Scalable HA Setup ----------------------------------- - -Scalable HA (High availability) setup is out of scope in this chapter. - -For a HA setup, a load balancer has to be placed in front of the web servers. - -To provide high availability with minimum administration overhead for storage -use ``MongoDB`` driver and for transport use ``wsgi`` driver. - -To have a small footprint while providing HA, you can use two web servers which -will host the application and three ``MongoDB`` servers (configured as -replica-set) which will host Messaging service's management store and -message store databases. At larger scale, the management store database and the -message store database are advised to be hosted on different ``MongoDB`` -replica sets. diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index 2c4f455d..00000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the Messaging service. - -To add additional services, see the -`additional documentation on installing OpenStack `_ . diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 536591d9..00000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Messaging service by creating messages via curl -utility: - -.. code-block:: console - - $ curl -i -X POST http://ZAQAR_ENDPOINT:8888/v2/queues/samplequeue/messages \ - -d '{"messages": [{"body": {"event": 1}, "ttl": 600}, {"body": {"event": 2}, "ttl": 600}]}' \ - -H "Content-type: application/json" \ - -H "Client-ID: CLIENT_ID" \ - -H "X-Auth-ToKen: TOKEN" - -Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) -which can be generated by, for example, ``uuidgen`` utility. - -Replace ``TOKEN`` variable with the authentication token retrieved from -Identity service. If you choose not to enable Keystone authentication you -won't have to pass a token. - -Replace ``ZAQAR_ENDPOINT`` variable with the endpoint of Messaging service. - -The normal response would be with status code 201 and look something like this: - -.. code-block:: console - - HTTP/1.1 201 Created - content-length: 135 - content-type: application/json; charset=UTF-8 - location: http://ZAQAR_ENDPOINT:8888/v2/queues/samplequeue/messages?ids=575f6f2515e5c87d779a9b20,575f6f2515e5c87d779a9b21 - Connection: close - - {"resources": ["/v2/queues/samplequeue/messages/575f6f2515e5c87d779a9b20", "/v2/queues/samplequeue/messages/575f6f2515e5c87d779a9b21"]} diff --git a/doc/source/user/authentication_tokens.rst b/doc/source/user/authentication_tokens.rst deleted file mode 100644 index 3f709949..00000000 --- a/doc/source/user/authentication_tokens.rst +++ /dev/null @@ -1,37 +0,0 @@ -Generate an Authentication Token -================================ - -You can use `cURL `__ to try the authentication -process in two steps: get a token, and send the token to a service. - -1. Get an authentication token by providing your user name and either - your API key or your password. Here are examples of both approaches: - - You can request a token by providing your user name and your - password. - - :: - - $ curl -X POST https://localhost:5000/v2.0/tokens -d '{"auth":{"passwordCredentials":{"username": "joecool", "password":"coolword"}, "tenantId":"5"}}' -H 'Content-type: application/json' - - Successful authentication returns a token which you can use as - evidence that your identity has already been authenticated. To use - the token, pass it to other services as an ``X-Auth-Token`` header. - - Authentication also returns a service catalog, listing the endpoints - you can use for Cloud services. - -2. Use the authentication token to send a ``GET`` to a service you would - like to use. - -Authentication tokens are typically valid for 24 hours. Applications -should be designed to re-authenticate after receiving a 401 -(Unauthorized) response from a service endpoint. - - **Note** - - If you programmatically parse an authentication response, be aware - that service names are stable for the life of the particular service - and can be used as keys. You should also be aware that a user's - service catalog can include multiple uniquely-named services that - perform similar functions. diff --git a/doc/source/user/getting_started.rst b/doc/source/user/getting_started.rst deleted file mode 100644 index 543d0b5f..00000000 --- a/doc/source/user/getting_started.rst +++ /dev/null @@ -1,387 +0,0 @@ -===================== -Getting Started Guide -===================== - -Overview --------- - -Messaging service is a RESTful API-based messaging -service. It supports distributed web applications,and is based on the -OpenStack Zaqar project. - -Messaging service is a vital component of large, distributed -web applications. You can use Messaging service for public, -private, and hybrid cloud environments. - -As you develop distributed web applications, you often have multiple -agents set up to complete sets of tasks for those applications. These -tasks can be anything from creating users to deleting blocks of storage. -Messaging service provides a simple interface that creates these tasks as -queues, messages, and claims. The interface then posts, claims, reads, -and deletes them as the tasks are needed and performed. - -Messaging service handles the distribution of tasks, but it does not -necessarily manage the order of the tasks. Applications handle the -workflow at a higher level. - -This guide explains how to access and start using the API so that you -can begin to use Messaging service for your applications. Instructions are -given for how to properly enter the necessary URLs, using cURL, to set -up and use a basic set of Messaging service operations. - -Prerequisites for Running Examples ----------------------------------- - -In order to run the examples in this guide, you must have the following -prerequisites: - -- A Cloud account - -- A username and password, as specified during registration - -- Prior knowledge of HTTP/1.1 conventions - -- Basic familiarity with Cloud and RESTful APIs - -How Messaging service Works ---------------------------- - -Following is an overview of how Messaging service works. For definitions -of Messaging service terms, see the below glossary. - -1. You create a queue to which producers or publishers post messages. - -2. Workers (consumers or subscribers) claim or get a message from the - queue, complete the work in that message, and delete the message. - - If a worker will be off-line before it completes the work in a - message, the worker can retire the claim's time to live (TTL), - putting the message back into the queue for another worker to claim. - -3. Subscribers monitor the claims from these queues to track activity - and help troubleshoot errors. - -For the majority of use cases, Messaging service is not responsible for -the ordering of messages. However, if there is only a single producer, -Messaging service ensures that messages are handled in a First In, First -Out (FIFO) order. - -Messaging Patterns ------------------- - -The Messaging service API supports a variety of messaging patterns -including the following: - -- Task distribution - -- Event broadcasting - -- Point-to-point messaging - -Task distribution ------------------ - -The task distribution pattern has the following characteristics: - -- A producer is programmed to send messages to a queue. - -- Multiple workers (or consumers) are programmed to monitor a queue. - -- Only one worker can claim a message so that no other worker can claim - the message and duplicate the work. - -- The worker must delete the message when work is done. - -- TTL restores a message to an unclaimed state if the worker never - finishes. - -This pattern is ideal for dispatching jobs to multiple processors. - -Event Broadcasting ------------------- - -Characteristics of the event broadcasting pattern are: - -- The publisher sends messages to a queue. - -- Multiple observers (or subscribers) get the messages in the queue. - -- Multiple observers take action on each message. - -- Observers send a marker to skip messages already seen. - -- TTL eventually deletes messages. - -This pattern is ideal for notification of events to multiple observers -at once. - -Point-to-point messaging ------------------------- - -Characteristics of the point-to-point messaging pattern are: - -- The publisher sends messages to a queue. - -- The consumer gets the messages in the queue. - -- The consumer can reply with the result of processing a message by - sending another message to the same queue (queues are duplex by - default). - -- The publisher gets replies from the queue. - -- The consumer sends a marker to skip messages already seen. - -- TTL eventually deletes messages. - -This pattern is ideal for communicating with a specific client, -especially when a reply is desired from that client. - -Messaging service Operations ----------------------------- - -This section lists all of the operations that are available in the -Messaging service API. This document uses some of the most common -operations in `OpenStack API Reference `__.. - -For details about all of the operations, see the Messaging service API v2 -Reference. - -Home Document -~~~~~~~~~~~~~ - -The following operation is available for the home document: - -- Get Home Document - -Queues -~~~~~~ - -The following operations are available for queues: - -- Create Queue - -- List Queues - -- Get Queue - -- Update Queue - -- Get Queue Stats - -- Delete Queue - -Messages -~~~~~~~~ - -The following operations are available for messages: - -- Post Message - -- Get Messages - -- Get a Specific Message - -- Get a Set of Messages by ID - -- Delete Message - -- Delete a Set of Messages by ID - -Claims -~~~~~~ - -The following operations are available for claims: - -- Claim Messages - -- Get Claim - -- Update Claim - -- Release Claim - -Subscriptions -~~~~~~~~~~~~~ - -The following operations are available for subscriptions: - -- Create Subscriptions - -- List Subscriptions - -- Get Subscription - -- Update Subscription - -- Delete Subscription - - -Pools -~~~~~ - -The following operations are available for Pools: - -- Create Pools - -- List Pools - -- Get Pool - -- Update Pool - -- Delete Pool - -Flavors -~~~~~~~ - -The following operations are available for Flavors: - -- Create Flavors - -- List Flavors - -- Get Flavor - -- Update Flavors - -- Delete Flavors - - -Health -~~~~~~ - -The following operations are available for Health: - -- Ping for basic health status - -- Get detailed health status - - -Use Cases ---------- - -Queuing systems are used to coordinate tasks within an application. Here -are some examples: - -- **Backup**: A backup application might use a queuing system to - connect the actions that users do in the a control panel to the - customer's backup agent on a server. When a customer wants to start a - backup, they simply choose "start backup" on a panel. Doing so causes - the producer to put a "startBackup" message into the queue. Every few - minutes, the agent on the customers server (the worker) checks the - queue to see if it has any new messages to act on. The agent claims - the "startBackup" message and kicks off the backup on the customer's - server. - -- **Storage**: Gathering statistics for a large, distributed storage - system can be a long process. The storage system can use a queuing - system to ensure that jobs complete, even if one initially fails. - Since messages are not deleted until after the worker has completed - the job, the storage system can make sure that no job goes undone. If - the worker fails to complete the job, the message stays in the queue - to be completed by another server. In this case, a worker claims a - message to perform a statistics job, but the claim's TTL expired and - the message is put back into the queue when the job took too long to - complete (meaning that it most likely failed). By giving the claim a - TTL, applications can protect themselves from workers going off-line - while processing a message. After a claim's TTL expires, the message - is put back into the queue for another worker to claim. - -- **Email**: The team for an email application is constantly migrating - customer email from old versions to newer ones, so they develop a - tool to let customers do it themselves. The migrations take a long - time, so they cannot be done with single API calls, or by a single - server. When a user starts a migration job from their portal, the - migration tool sends messages to the queue with details of how to run - the migration. A set of migration engines, the consumers in this - case, periodically check the queues for new migration tasks, claim - the messages, perform the migration, and update a database with the - migration details. This process allows a set of servers to work - together to accomplish large migrations in a timely manner. - -Following are some generic use cases for Messaging service: - -- Distribute tasks among multiple workers (transactional job queues) - -- Forward events to data collectors (transactional event queues) - -- Publish events to any number of subscribers (event broadcasting) - -- Send commands to one or more agents (point-to-point messaging or - event broadcasting) - -- Request an action or get information from a Remote Procedure Call - (RPC) agent (point-to-point messaging) - -Additional Resources --------------------- - -For more information about using the API, see the Messaging service API v2 -Reference. All you need to get started with Messaging service is the -getting started guide, the reference, and your Cloud account. - -For information about the OpenStack Zaqar API, see -`OpenStack API Reference `__. - -This API uses standard HTTP 1.1 response codes as documented at -`www.w3.org/Protocols/rfc2616/rfc2616-sec10.html `__. - -Glossary --------- - -**Claim** -The process of a worker checking out a message to perform a task. -Claiming a message prevents other workers from attempting to process the -same messages. - -**Claim TTL** -Defines how long a message will be in claimed state. A message can be -claimed by one worker at a time. - -**Consumer** -A server that claims messages from the queue. - -**Message** -A task, a notification, or any meaningful data that a producer or -publisher sends to the queue. A message exists until it is deleted by a -recipient or automatically by the system based on a TTL (time-to-live) -value. - -**Message TTL** -Defines how long a message will be accessible. - -**Producer** -A server or application that sends messages to the queue. - -**Producer - Consumer** -A pattern where each worker application that reads the queue has to -claim the message in order to prevent duplicate processing. Later, when -work is done, the worker is responsible for deleting the message. If -message is not deleted in a predefined time, it can be claimed by other -workers. - -**Publisher** -A server or application that posts messages to the queue with the intent -to distribute information or updates to multiple subscribers. - -**Publisher - Subscriber** -A pattern where all worker applications have access to all messages in -the queue. Workers cannot delete or update messages. - -**Queue** -The entity that holds messages. Ideally, a queue is created per work -type. For example, if you want to compress files, you would create a -queue dedicated to this job. Any application that reads from this queue -would only compress files. - -**Subscriber** -An observer that watches messages like an RSS feed but does not claim -any messages. - -**TTL** -Time-to-live value. - -**Worker** -A client that claims messages from the queue and performs actions based -on those messages. diff --git a/doc/source/user/headers_queue_api_working.rst b/doc/source/user/headers_queue_api_working.rst deleted file mode 100644 index e1b4baa4..00000000 --- a/doc/source/user/headers_queue_api_working.rst +++ /dev/null @@ -1,356 +0,0 @@ -Common Headers -============== - -Each request to the Message Queuing API must include certain standard -and extended HTTP headers (as shown in the following table). These -headers provide host, agent, authentication, and other pertinent -information to the server. The following table provides the common -headers used by the API. - -.. list-table:: - :widths: 50 50 - :header-rows: 1 - - * - Header - - Description - * - Host - - Host name of the API - * - Date - - Current date and time - * - Accept - - Media type to use. Initially, only ``application/json`` is - supported. **Note: The "Accept" header is required.** - * - Accept-Encoding - - Specifies that the agent accepts gzip-encoded response bodies - * - Content-Type - - ``application/json`` - * - Content-Length - - For ``POST`` or ``PUT`` requests, the length in bytes of the - message document being submitted - * - X-Auth-Token - - Authorization token - * - X-Project-Id - - An ID for a project to which the value of X-Auth-Token grants - access. Queues are created under this project. The project ID - is the same as the account ID (also sometimes called tenant ID). - * - Client-ID - - A UUID for each client instance. The UUID must be submitted in - its canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). - The client generates the Client-ID once. Client-ID persists - between restarts of the client so the client should - reuse that same Client-ID. - -**Note: All message-related operations require the use of "Client-ID" in -the headers to ensure that messages are not echoed back to the client that -posted them, unless the client explicitly requests this.** - -Working with the Message Queuing API -==================================== - -This chapter contains a simple exercise with some basic Message Queuing -requests that you will commonly use. Example requests are provided in -cURL, followed by the response. - -For a complete list of operations available for Message Queuing, see :doc:`getting_started` -Each operation is fully described in the `Message Queuing API v2 -Reference `_. - -Create Queue ------------- - -The Create Queue operation creates a queue in the region of your choice. - -The body of the PUT request is empty. - -The template is as follows: - -.. code:: json - - PUT {endpoint}/queues/{queue_name} - -The ``queue_name`` parameter specifies the name to give the queue. The -name *must not* exceed 64 bytes in length and is limited to US-ASCII -letters, digits, underscores, and hyphens. - -Following are examples of a Create Queue request and response: - -.. code-block:: bash - - curl -i -X PUT https://queues.api.openstack.org/v2/queues/samplequeue \ - -H "X-Auth-Token: " \ - -H "Accept: application/json" \ - -H "X-Project-Id: " - -.. code:: json - - HTTP/1.1 201 Created - Content-Length: 0 - Location: /v2/queues/samplequeue - -Post Message ------------- - -The Post Message operation inserts one or more messages in a queue. - -You can submit up to 10 messages in a single request, but you must -encapsulate them in a collection container (an array in JSON, even for a -single message - without the JSON array, you receive an "Invalid body -request" error message). You can use the resulting value of the location -header or response body to retrieve the created messages for further -processing if needed. - -The template is as follows: - -.. code:: json - - POST {endpoint}/queues/{queue_name}/messages - -The client specifies only the body and ttl attributes for the message. -Metadata, such as id and age, is added. - -The response body contains a list of resource paths that correspond to -each message submitted in the request, in the same order as they were -submitted. - -If a server-side error occurs during the processing of the submitted -messages, a partial list is returned. The ``partial`` attribute is set -to ``true``, and the client tries to post the remaining messages again. - - **Important** - - The ``partial`` attribute has been deprecated in the v1.0 API and is - not available in the v1.1 API. Drivers are now required to operate - in a transactional manner. In other words, either all messages must - be posted, or none of them. - -The ``body`` attribute specifies an arbitrary document that constitutes -the body of the message being sent. - -The following rules apply for the maximum size: - -- The size is limited to 256 KB for the entire request body (as-is), - including whitespace. - -- The maximum size of posted messages is the maximum size of the entire - request document (rather than the sum of the individual message - ``body`` field values as it was earlier releases). On error, the - client is notified of by how much the request exceeded the limit. - -The document *must* be valid JSON. (The Message Queuing service -validates it.) - -The ``ttl`` attribute specifies the lifetime of the message. When the -lifetime expires, the server deletes the message and removes it from the -queue. Valid values are 60 through 1209600 seconds (14 days). - - **Note** - - The server might not actually delete the message until its age - reaches (ttl + 60) seconds. So there might be a delay of 60 seconds - after the message expires before it is deleted. - -The following are examples of a Post Message request and response: - -.. code:: bash - - curl -i -X POST https://queues.api.openstack.org/v1/queues/samplequeue/messages -d \ - '[{"ttl": 300,"body": {"event": "BackupStarted"}},{"ttl": 60,"body": {"play": "hockey"}}]' \ - -H "Content-type: application/json" \ - -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ - -H "X-Auth-Token: " \ - -H "Accept: application/json" \ - -H "X-Project-Id: " - -.. code:: json - - HTTP/1.1 201 Created - Content-Length: 153 - Content-Type: application/json; charset=utf-8 - Location: /v1/queues/samplequeue/messages?ids=51ca00a0c508f154c912b85c,51ca00a0c508f154c912b85d - - {"partial": false, "resources": ["/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85c", "/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85d"]} - -Claim Messages --------------- - -The Claim Messages operation claims a set of messages (up to the value -of the ``limit`` parameter) from oldest to newest and skips any messages -that are already claimed. If there are no messages available to claim, -the Message Queuing service returns an HTTP ``204 No Content`` response -code. - -The template is as follows: - -.. code:: json - - POST {endpoint}/queues/{queue_name}/claims{?limit} - Content-Type: application/json - - { - "ttl": {claim_ttl}, - "grace": {message_grace} - } - -The client (worker) needs to delete the message when it has finished -processing it. The client deletes the message before the claim expires -to ensure that the message is processed only once. If a client needs -more time, the Cloud Service provides the Update Claim operation to make -changes. See the Message Queuing API v1 Reference for a description of -this operation. As part of the delete operation, workers specify the -claim ID (which is best done by simply using the provided href). If -workers perform these actions, then if a claim simply expires, the -server can return an error and notify the worker of a possible race -condition. This action gives the worker a chance to roll back its own -processing of the given message because another worker can claim the -message and process it. - -The age given for a claim is relative to the server's clock. The claim's -age is useful for determining how quickly messages are getting processed -and whether a given message's claim is about to expire. - -When a claim expires, it is released back to the queue for other workers -to claim. (If the original worker failed to process the message, another -client worker can then claim the message.) - -The ``limit`` parameter specifies the number of messages to claim. The -``limit`` parameter is configurable. The default is 20. Messages are -claimed based on the number of messages available. The server might -claim and return less than the requested number of messages. - -The ``ttl`` attribute specifies the lifetime of the claim. While -messages are claimed, they are not available to other workers. The value -must be between 60 and 43200 seconds (12 hours). - -The ``grace`` attribute specifies the message grace period in seconds. -Valid values are between 60 and 43200 seconds (12 hours). To deal with -workers that have stopped responding (for up to 1209600 seconds or 14 -days, including claim lifetime), the server extends the lifetime of -claimed messages to be at least as long as the lifetime of the claim -itself, plus the specified grace period. If a claimed message normally -lives longer than the grace period, its expiration is not adjusted. it - -Following are examples of a Claim Messages request and response: - -.. code:: bash - - curl -i -X POST https://queues.api.openstack.org/v1/queues/samplequeue/claims -d \ - '{"ttl": 300,"grace":300}' \ - -H "Content-type: application/json" \ - -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ - -H "X-Auth-Token: " \ - -H "Accept: application/json" \ - -H "X-Project-Id: " - -.. code:: http - - HTTP/1.1 201 OK - Content-Length: 164 - Content-Type: application/json; charset=utf-8 - Location: /v1/queues/samplequeue/claims/51ca011c821e7250f344efd6 - X-Project-Id: - - [ - { - "body": { - "event": "BackupStarted" - }, - "age": 124, - "href": "\/v1\/queues\/samplequeue\/messages\/51ca00a0c508f154c912b85c?claim_id=51ca011c821e7250f344efd6", - "ttl": 300 - } - ] - -Delete Message with Claim ID ----------------------------- - -The Delete Message operations deletes messages. - -The template is as follows: - -.. code:: http - - DELETE {endpoint}/queues/{queue_name}/messages/{message_id}{?claim_id} - -The ``message_id`` parameter specifies the message to delete. - -The ``claim_id`` parameter specifies that the message is deleted only if -it has the specified claim ID and that claim has not expired. This -specification is useful for ensuring that only one worker processes any -given message. When a worker's claim expires before it deletes a message -that it has processed, the worker must roll back any actions it took -based on that message because another worker can now claim and process -the same message. - -Following are examples of a Delete Message request and response: - -.. code:: bash - - curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85c?claim_id=51ca011c821e7250f344efd6 \ - -H "Content-type: application/json" \ - -H "X-Auth-Token: " \ - -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ - -H "Accept: application/json" \ - -H "X-Project-Id: " - -.. code:: http - - HTTP/1.1 204 No Content - -Release Claim -------------- - -The Release Claim operation immediately releases a claim, making any -remaining, undeleted) messages associated with the claim available to -other workers. - -The template is as follows: - -.. code:: http - - DELETE {endpoint}/queues/{queue_name}/claims/{claim_id} - -This operation is useful when a worker is performing a graceful -shutdown, fails to process one or more messages, or is taking longer -than expected to process messages and wants to make the remainder of the -messages available to other workers. - -Following are examples of a Release Claim request and response: - -.. code:: bash - - curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue/claims/51ca011c821e7250f344efd6 \ - -H "Content-type: application/json" \ - -H "X-Auth-Token: " \ - -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ - -H "Accept: application/json" \ - -H "X-Project-Id: " - -.. code:: http - - HTTP/1.1 204 No Content - -Delete Queue ------------- - -The Delete Queue operation immediately deletes a queue and all of its -existing messages. - -The template is as follows: - -.. code:: http - - DELETE {endpoint}/queues/{queue_name} - -Following are examples of a Delete Queue request and response: - -.. code:: bash - - curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue \ - -H "Content-type: application/json" \ - -H "X-Auth-Token: " \ - -H "Accept: application/json" \ - -H "X-Project-Id: " - -.. code:: http - - HTTP/1.1 204 No Content diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index 546f68ae..00000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -========== -User Guide -========== - -.. toctree:: - :maxdepth: 2 - - getting_started - send_request_api - authentication_tokens - headers_queue_api_working diff --git a/doc/source/user/send_request_api.rst b/doc/source/user/send_request_api.rst deleted file mode 100644 index 22e35a40..00000000 --- a/doc/source/user/send_request_api.rst +++ /dev/null @@ -1,89 +0,0 @@ -Send Requests to the API -======================== - -You have several options for sending requests through an API: - -- Developers and testers may prefer to use cURL, the command-line tool - from http://curl.haxx.se/. - - With cURL you can send HTTP requests and receive responses back from - the command line. - -- If you like to use a more graphical interface, the REST client for - Firefox also works well for testing and trying out commands, see - https://addons.mozilla.org/en-US/firefox/addon/restclient/. - -- You can also download and install rest-client, a Java application to - test RESTful web services, from - https://github.com/wiztools/rest-client. - -Sending API Requests Using cURL -------------------------------- - -cURL is a command-line tool that is available in UNIX® system-based -environments and Apple Mac OS X® systems, and can be downloaded for -Microsoft Windows® to interact with the REST interfaces. For more -information about cURL, visit http://curl.haxx.se/. - -cURL enables you to transmit and receive HTTP requests and responses -from the command line or from within a shell script. As a result, you -can work with the REST API directly without using one of the client -APIs. - -The following cURL command-line options are used in this guide to run -the examples. - -.. list-table:: - :widths: 50 50 - :header-rows: 1 - - * - Option - - Description - * - ``-d`` - - Sends the specified data in a ``POST`` request to the HTTP server. - * - ``-i`` - - Includes the HTTP header in the output. - * - ``-H HEADER`` - - Specifies an HTTP header in the request. - * - ``-X`` - - Specifies the request method to use when communicating with the HTTP - server. The specified request is used instead of the default - method, which is GET. For example, ``-X PUT`` specifies to use - the ``PUT`` request method. - -**Note** If you have the tools, you can run the cURL JSON request examples -with the following options to format the output from cURL: -`` | python -mjson.tool``. - -Copying and Pasting cURL Request Examples into a Terminal Window ----------------------------------------------------------------- - -To run the cURL request examples shown in this guide on Linux or Mac -systems, perform the following actions: - -1. Copy and paste each example from the HTML version of this guide into - an ASCII text editor (for example, vi or TextEdit). You can click on - the small document icon to the right of each request example to - select it. - -2. Modify each example with your required account information and so - forth, as detailed in this guide. - -3. After you are finished modifying the text for the cURL request - example with your information (for example, ``your_username`` - and ``your_api_key``), paste it into your terminal window. - -4. Press Enter to run the cURL command. - - **Note** - - The carriage returns in the cURL request examples that are part of - the cURL syntax are escaped with a backslash (\\) in order to avoid - prematurely terminating the command. However, you should not escape - carriage returns inside the JSON message within the command. - - **Tip** - - If you have trouble copying and pasting the examples as described, - try typing the entire example on one long line, removing all the - backslash line continuation characters. diff --git a/dox.yml b/dox.yml deleted file mode 100644 index 42b73924..00000000 --- a/dox.yml +++ /dev/null @@ -1,12 +0,0 @@ -images: - - infra/trusty -add: - - requirements.txt - - test-requirements.txt -prep: - - apt-get install -y python-dev mongodb-server python-pymongo pkg-config redis-server python-redis libxml2-dev libxslt-dev lib32z1-dev - - pip install -U -r requirements.txt -r test-requirements.txt - - service mongodb start - - service redis-server start - - export ZAQAR_TEST_EVERYTHING=1p -commands: python setup.py testr --slowest diff --git a/etc/logging.conf.sample b/etc/logging.conf.sample deleted file mode 100644 index 54d75b19..00000000 --- a/etc/logging.conf.sample +++ /dev/null @@ -1,49 +0,0 @@ -[loggers] -keys=root,server,combined - -[formatters] -keys=normal,normal_with_name,debug - -[handlers] -keys=production,file,devel - -[logger_root] -level=NOTSET -handlers=devel - -[logger_server] -level=DEBUG -handlers=devel -qualname=zaqar-server - -[logger_combined] -level=DEBUG -handlers=devel -qualname=zaqar-combined - -[handler_production] -class=handlers.SysLogHandler -level=ERROR -formatter=normal_with_name -args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) - -[handler_file] -class=FileHandler -level=DEBUG -formatter=normal_with_name -args=('zaqar.log', 'w') - -[handler_devel] -class=StreamHandler -level=NOTSET -formatter=debug -args=(sys.stdout,) - -[formatter_normal] -format=%(asctime)s %(levelname)s %(message)s - -[formatter_normal_with_name] -format=(%(name)s): %(asctime)s %(levelname)s %(message)s - -[formatter_debug] -format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/etc/oslo-config-generator/zaqar.conf b/etc/oslo-config-generator/zaqar.conf deleted file mode 100644 index a5caa8c0..00000000 --- a/etc/oslo-config-generator/zaqar.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -output_file = etc/zaqar.conf.sample -namespace = zaqar.bootstrap -namespace = zaqar.common.auth -namespace = zaqar.common.configs -namespace = zaqar.storage.pipeline -namespace = zaqar.storage.pooling -namespace = zaqar.storage.mongodb -namespace = zaqar.storage.redis -namespace = zaqar.storage.sqlalchemy -namespace = zaqar.storage.swift -namespace = zaqar.transport.wsgi -namespace = zaqar.transport.base -namespace = zaqar.transport.validation -namespace = keystonemiddleware.auth_token -namespace = oslo.cache -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.middleware.cors -namespace = osprofiler -namespace = oslo.policy -namespace = oslo.reports diff --git a/etc/policy.json.sample b/etc/policy.json.sample deleted file mode 100644 index 83a6bd5d..00000000 --- a/etc/policy.json.sample +++ /dev/null @@ -1,48 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "queues:get_all": "", - "queues:create": "", - "queues:get": "", - "queues:delete": "", - "queues:update": "", - "queues:stats": "", - "queues:share": "", - "queues:purge": "", - - "messages:get_all": "", - "messages:create": "", - "messages:get": "", - "messages:delete": "", - "messages:delete_all": "", - - "claims:get_all": "", - "claims:create": "", - "claims:get": "", - "claims:delete": "", - "claims:update": "", - - "subscription:get_all": "", - "subscription:create": "", - "subscription:get": "", - "subscription:delete": "", - "subscription:update": "", - "subscription:confirm": "", - - "pools:get_all": "rule:context_is_admin", - "pools:create": "rule:context_is_admin", - "pools:get": "rule:context_is_admin", - "pools:delete": "rule:context_is_admin", - "pools:update": "rule:context_is_admin", - - "flavors:get_all": "", - "flavors:create": "rule:context_is_admin", - "flavors:get": "", - "flavors:delete": "rule:context_is_admin", - "flavors:update": "rule:context_is_admin", - - "ping:get": "", - "health:get": "rule:context_is_admin" -} diff --git a/etc/uwsgi.conf b/etc/uwsgi.conf deleted file mode 100644 index f520435d..00000000 --- a/etc/uwsgi.conf +++ /dev/null @@ -1,7 +0,0 @@ -[uwsgi] -strict = true -http = :8888 -processes = 1 -threads = 4 -wsgi-file = /opt/stack/zaqar/zaqar/transport/wsgi/app.py -callable = app diff --git a/etc/zaqar-benchmark-messages.json b/etc/zaqar-benchmark-messages.json deleted file mode 100644 index 97f90928..00000000 --- a/etc/zaqar-benchmark-messages.json +++ /dev/null @@ -1,72 +0,0 @@ -[ - { - "weight": 0.8, - "doc": { - "ttl": 60, - "body": { - "id": "7FA23C90-62F7-40D2-9360-FBD5D7D61CD1", - "evt": "Wakeup" - } - } - }, - { - "weight": 0.1, - "doc": { - "ttl": 3600, - "body": { - "ResultSet": { - "totalResultsAvailable": 1827221, - "totalResultsReturned": 2, - "firstResultPosition": 1, - "Result": [ - { - "Title": "potato jpg", - "Summary": "Kentang Si bungsu dari keluarga Solanum tuberosum L ini ternyata memiliki khasiat untuk mengurangi kerutan jerawat bintik hitam dan kemerahan pada kulit Gunakan seminggu sekali sebagai", - "Url": "http://www.mediaindonesia.com/spaw/uploads/images/potato.jpg", - "ClickUrl": "http://www.mediaindonesia.com/spaw/uploads/images/potato.jpg", - "RefererUrl": "http://www.mediaindonesia.com/mediaperempuan/index.php?ar_id=Nzkw", - "FileSize": 22630, - "FileFormat": "jpeg", - "Height": 362, - "Width": 532, - "Thumbnail": { - "Url": "http://thm-a01.yimg.com/nimage/557094559c18f16a", - "Height": 98, - "Width": 145 - } - }, - { - "Title": "potato jpg", - "Summary": "Introduction of puneri aloo This is a traditional potato preparation flavoured with curry leaves and peanuts and can be eaten on fasting day Preparation time 10 min", - "Url": "http://www.infovisual.info/01/photo/potato.jpg", - "ClickUrl": "http://www.infovisual.info/01/photo/potato.jpg", - "RefererUrl": "http://sundayfood.com/puneri-aloo-indian-%20recipe", - "FileSize": 119398, - "FileFormat": "jpeg", - "Height": 685, - "Width": 1024, - "Thumbnail": { - "Url": "http://thm-a01.yimg.com/nimage/7fa23212efe84b64", - "Height": 107, - "Width": 160 - } - } - ] - } - } - } - }, - { - "weight": 0.1, - "doc": { - "ttl": 360, - "body": { - "id": "7FA23C90-62F7-40D2-9360-FBD5D7D61CD1", - "evt": "StartBackup", - "files": [ - "/foo/bar/stuff/thing.dat" - ] - } - } - } -] diff --git a/etc/zaqar-benchmark.conf.sample b/etc/zaqar-benchmark.conf.sample deleted file mode 100644 index 6e190d4b..00000000 --- a/etc/zaqar-benchmark.conf.sample +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] -# verbose = False -# server_url = http://localhost:8888 -# messages_path = some/path/to/messages.json -# queue_prefix = ogre-test-queue- diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 04e72b61..00000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,29 +0,0 @@ -======================= -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure: - -* zaqar-zaqar.yaml is rally task that will be run in gates - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute path in rally tasks. - Files will be in ~/.rally/extra/* - - -Useful links: - -* More about rally: https://rally.readthedocs.org/en/latest/ - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://git.openstack.org/cgit/openstack/rally/tree/samples/plugins - diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index da345f25..00000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,7 +0,0 @@ -=========== -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* - diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index fe98dfd6..00000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,12 +0,0 @@ -============= -Rally plugins -============= - - -All *.py modules from this directory will auto loaded by Rally and all -plugins will be discoverable. There is no need in any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push to Rally code base all interested and useful -benchmarks, cause that simplifies a lot life of operators. - diff --git a/rally-jobs/plugins/__init__.py b/rally-jobs/plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/zaqar-zaqar.yaml b/rally-jobs/zaqar-zaqar.yaml deleted file mode 100644 index 8077a38b..00000000 --- a/rally-jobs/zaqar-zaqar.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - ZaqarBasic.create_queue: - - - args: - name_length: 10 - runner: - type: "rps" - times: 1000 - rps: 50 - sla: - failure_rate: - max: 0 diff --git a/releasenotes/notes/.gitignore b/releasenotes/notes/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml b/releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml deleted file mode 100644 index dee43ea9..00000000 --- a/releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - The OSprofiler is integrated to Zaqar in Ocata. It is a library from oslo. - It aims to analyse the performance bottleneck issue by making possible to - generate one trace per request affecting all involved services and build a - tree of calls. diff --git a/releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml b/releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml deleted file mode 100644 index 1f6ddb50..00000000 --- a/releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - Add a new webhook notifier using trust authentication. When using the - 'trust+' URL prefix, Zaqar will create a Keystone trust for the user, and - then use it when a notification happens to authenticate against Keystone - and send the token to the endpoint. - - Support 'post_data' and 'post_headers' options on subscribers, allowing - customization of the payload when having a webhook subscriber. The - 'post_data' option supports the '$zaqar_message$' string template, which - will be replaced by the serialized JSON message if specified. diff --git a/releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml b/releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml deleted file mode 100644 index 846618ba..00000000 --- a/releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - The new Swift storage backend is added to Zaqar in Ocata. It's experimental - currently. To use this backend, you should modify the "drivers" section in - the config file. - [Blueprint `swift-storage-driver `_] diff --git a/releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml b/releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml deleted file mode 100644 index b3154223..00000000 --- a/releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - Add two configurations for the notification endpoint of the websocket - server, instead of a random port and local address. - One is 'notification-bind', address on which the notification server will - listen. Another is 'notification-port', port on which the notification - server will listen. diff --git a/releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml b/releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml deleted file mode 100644 index 027ea925..00000000 --- a/releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - Zaqar API v2 has been released for several cycles and it is integrated - as the default API version by most of the OpenStack services. So it is time - to deprecated v1.1 in favor of v2. Now in Newton cycle, Zaqar API v1.1 is - officially deprecated. \ No newline at end of file diff --git a/releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml b/releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml deleted file mode 100644 index c5db8e4e..00000000 --- a/releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Zaqar didn't return the reserved metadata when listing detailed queue. - After this fix, Zaqar will return reserved metadata '_default_message_ttl' - and '_max_messages_post_size' in response of listing detailed queue. diff --git a/releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml b/releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml deleted file mode 100644 index f98f8dfb..00000000 --- a/releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - When access the root path of Zaqar service, for example: - curl GET http://127.0.0.1:8888/, user will see 401 error. Which - will cause some front end proxy (like HAProxy) to complain. Now this issue - has been fixed. diff --git a/releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml b/releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml deleted file mode 100644 index 6d8482da..00000000 --- a/releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Query for all subscriptions on a given queue by taking into account the - returned marker, if any. Without this fix, only 10 subscriptions can be - extracted from database to send notification. diff --git a/releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml b/releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml deleted file mode 100644 index 3ffd681e..00000000 --- a/releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system - administrators can generate a report about the state of a running Zaqar - executable. This report is called a *Guru Meditation Report*. Now Guru can - support wsgi, websocket and uwsgi modes all. diff --git a/releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml b/releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml deleted file mode 100644 index 2d19cec1..00000000 --- a/releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Queues now behave lazy in subscriptions also. So there is no need for - the user to pre-create a queue before creating a subscription for this - queue. Zaqar will create the queue automatically on the subscription - creation request. As before, all subscriptions will continue to stay - active even if the corresponding queue was deleted. - diff --git a/releasenotes/notes/purge-queue-6788a249ee59d55a.yaml b/releasenotes/notes/purge-queue-6788a249ee59d55a.yaml deleted file mode 100644 index f98d89ca..00000000 --- a/releasenotes/notes/purge-queue-6788a249ee59d55a.yaml +++ /dev/null @@ -1,4 +0,0 @@ -features: - - A new queue action is added so that users can purge a queue - quickly. That means all the messages and subscriptions will be deleted - automatically but the metadata of the queue will be kept. diff --git a/releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml b/releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml deleted file mode 100644 index 844acf0b..00000000 --- a/releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Currently Zaqar can support more built-in/reserved attributes in queue. - For now there are two important attributes 'max_messages_post_size' and - 'max_message_ttl'. With this feature, when user query queues Zaqar will show - those two attributes (read from config file if there is no customized value - from user) in queue metadata so that user can know what value it is. - diff --git a/releasenotes/notes/sql_init-c9b3883241631f24.yaml b/releasenotes/notes/sql_init-c9b3883241631f24.yaml deleted file mode 100644 index de119065..00000000 --- a/releasenotes/notes/sql_init-c9b3883241631f24.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -critical: - - | - When using the sqlalchemy driver, operators now are required to run - "zaqar-sql-db-manage upgrade" before making the service available. The - service previously tried to create the database on the first request, but - it was bound to race conditions. diff --git a/releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml b/releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml deleted file mode 100644 index 8df730ef..00000000 --- a/releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add migration support for Zaqar's sqlalchemy storage driver. diff --git a/releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml b/releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml deleted file mode 100644 index fffc24a4..00000000 --- a/releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - This feature is the third part of subscription confirmation feature. - Support to send email to subscriber if confirmation is needed. - To use this feature, user need to set the config option - "external_confirmation_url", "subscription_confirmation_email_template" - and "unsubscribe_confirmation_email_template". - The confirmation page url that will be used in email subscription - confirmation before notification, this page is not hosted in Zaqar server, - user should build their own web service to provide this web page. - The subscription_confirmation_email_template let user to customize the - subscription confimation email content, including topic, body and - sender. The unsubscribe_confirmation_email_template let user to customize - the unsubscribe confimation email content, including topic, body and - sender too. diff --git a/releasenotes/notes/support-cors-af8349382a44aa0d.yaml b/releasenotes/notes/support-cors-af8349382a44aa0d.yaml deleted file mode 100644 index d4663153..00000000 --- a/releasenotes/notes/support-cors-af8349382a44aa0d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Zaqar now supports Cross-Origin Resource Sharing (CORS). \ No newline at end of file diff --git a/releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml b/releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml deleted file mode 100644 index 96ee69e7..00000000 --- a/releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support dot character in queue's name, like 'service.test_queue'. diff --git a/releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml b/releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml deleted file mode 100644 index 8d332ab6..00000000 --- a/releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Currently, the v1 API is still accessible though it has been deprecated - for a while. And we're going to deprecate v1.1 soon. To keep the backward - compatibility, a new config option - ``enable_deprecated_api_versions`` - is added so that operator can totally turn off an API version or still - support it by adding the API version to the list of the new config option. diff --git a/releasenotes/notes/support_dead_letter_queue_for_mongodb-c8b7303319e7f920.yaml b/releasenotes/notes/support_dead_letter_queue_for_mongodb-c8b7303319e7f920.yaml deleted file mode 100644 index a3407144..00000000 --- a/releasenotes/notes/support_dead_letter_queue_for_mongodb-c8b7303319e7f920.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Support for dead letter queue is added for MongoDB. With this feature, - message will be moved to the specified dead letter queue if it's claimed - many times but still can't successfully processed by a client. New reseved - metadata keys of queue are added: _max_claim_count, _dead_letter_queue and - _dead_letter_queue_messages_ttl. diff --git a/releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml b/releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml deleted file mode 100644 index 3d1f22cd..00000000 --- a/releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - In IPv6 management network environment, starting Zaqar server will - run into 'Address family for hostname not support' error when use WSGI - simple server. The root cause is that Python's TCPServer implementation - is hard-coded to use IPv4, even in IPv6 environments. Now this issue has - been fixed. diff --git a/releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml b/releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml deleted file mode 100644 index 05e19000..00000000 --- a/releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - Now before users send messages to subscribers through a queue, the - subscribers should be confirmed first. Zaqar only sends messages to the - confirmed subscribers. This feature supports "webhook" and "mailto" - subscribers with mongoDB or redis backend. The "mailto" part will be done - in O cycle. Set "require_confirmation = True" to enable this feature. The - default value is "False" now and we will enable it by default after one or - two cycles. diff --git a/releasenotes/source/_static/.gitignore b/releasenotes/source/_static/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.gitignore b/releasenotes/source/_templates/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 18dda4be..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,278 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'reno.sphinxext', - 'openstackdocstheme', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Zaqar Release Notes' -copyright = u'2015, Zaqar Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from zaqar.version import version_info as zaqar_version -# The full version, including alpha/beta/rc tags. -release = zaqar_version.version_string_with_vcs() -# The short X.Y version. -version = zaqar_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'ZaqarReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'ZaqarReleaseNotes.tex', u'Zaqar Release Notes Documentation', - u'Zaqar Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'zaqarreleasenotes', u'Zaqar Release Notes Documentation', - [u'Zaqar Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'ZaqarReleaseNotes', u'Zaqar Release Notes Documentation', - u'Zaqar Developers', 'ZaqarReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# -- Options for openstackdocstheme ------------------------------------------- -repository_name = 'openstack/zaqar' -bug_project = 'zaqar' -bug_tag = '' diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 7e355333..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -====================== - Zaqar Release Notes -====================== - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - mitaka - liberty diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 08cd4ee6..00000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty \ No newline at end of file diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index e5456096..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Mitaka Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed2..00000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42..00000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aabc..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 6055e162..00000000 --- a/requirements.txt +++ /dev/null @@ -1,34 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -pbr!=2.1.0,>=2.0.0 # Apache-2.0 - -alembic>=0.8.10 # MIT -Babel!=2.4.0,>=2.3.4 # BSD -falcon>=1.0.0 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -iso8601>=0.1.11 # MIT -keystonemiddleware>=4.12.0 # Apache-2.0 -msgpack-python>=0.4.0 # Apache-2.0 -python-memcached>=1.56 # PSF -WebOb>=1.7.1 # MIT -stevedore>=1.20.0 # Apache-2.0 -six>=1.9.0 # MIT -oslo.cache>=1.5.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.reports>=0.6.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -osprofiler>=1.4.0 # Apache-2.0 -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD -trollius>=1.0 # Apache-2.0 -autobahn>=0.10.1 # MIT License -requests>=2.14.2 # Apache-2.0 -futurist!=0.15.0,>=0.11.0 # Apache-2.0 diff --git a/samples/html/confirmation_web_service_sample.py b/samples/html/confirmation_web_service_sample.py deleted file mode 100644 index 0470eef5..00000000 --- a/samples/html/confirmation_web_service_sample.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -import json -import logging -from oslo_utils import uuidutils -import requests -import sys - -try: - import SimpleHTTPServer - import SocketServer -except Exception: - from http import server as SimpleHTTPServer - import socketserver as SocketServer - - -if len(sys.argv) > 2: - PORT = int(sys.argv[2]) -elif len(sys.argv) > 1: - PORT = int(sys.argv[1]) -else: - PORT = 5678 - - -class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): - """This is the sample service for email subscription confirmation. - - """ - - def do_OPTIONS(self): - logging.warning('=================== OPTIONS =====================') - self.send_response(200) - self.send_header('Access-Control-Allow-Origin', self.headers['origin']) - self.send_header('Access-Control-Allow-Methods', 'PUT') - self.send_header('Access-Control-Allow-Headers', - 'client-id,confirmation-url,content-type,url-expires,' - 'url-methods,url-paths,url-signature,x-project-id,' - 'confirm') - self.end_headers() - logging.warning(self.headers) - return - - def do_PUT(self): - logging.warning('=================== PUT =====================') - self._send_confirm_request() - self.send_response(200) - self.send_header('Access-Control-Allow-Origin', self.headers['origin']) - self.end_headers() - message = "{\"message\": \"ok\"}" - self.wfile.write(message) - logging.warning(self.headers) - return - - def _send_confirm_request(self): - url = self.headers['confirmation-url'] - confirmed_value = True - try: - if self.headers['confirm'] == "false": - confirmed_value = False - except KeyError: - pass - headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json', - 'X-Project-ID': self.headers['x-project-id'], - 'Client-ID': uuidutils.generate_uuid(), - 'URL-Methods': self.headers['url-methods'], - 'URL-Signature': self.headers['url-signature'], - 'URL-Paths': self.headers['url-paths'], - 'URL-Expires': self.headers['url-expires'], - } - data = {'confirmed': confirmed_value} - requests.put(url=url, data=json.dumps(data), headers=headers) - -Handler = ServerHandler -httpd = SocketServer.TCPServer(("", PORT), Handler) -httpd.serve_forever() diff --git a/samples/html/subscriptionConfirmation.html b/samples/html/subscriptionConfirmation.html deleted file mode 100644 index f04ca77e..00000000 --- a/samples/html/subscriptionConfirmation.html +++ /dev/null @@ -1,148 +0,0 @@ - - - - - - - - -
- -
-

Confirming subscription...

-
- -
-
-
- - - - - diff --git a/samples/html/unsubscriptionConfirmation.html b/samples/html/unsubscriptionConfirmation.html deleted file mode 100644 index 320a95e7..00000000 --- a/samples/html/unsubscriptionConfirmation.html +++ /dev/null @@ -1,145 +0,0 @@ - - - - - - - - -
- -
-

Removing subscription...

-
- -
-
-
- - - - - diff --git a/samples/java-api-for-websocket/receive_message/JsonDecoder.java b/samples/java-api-for-websocket/receive_message/JsonDecoder.java deleted file mode 100755 index ced97f22..00000000 --- a/samples/java-api-for-websocket/receive_message/JsonDecoder.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.openstack.zaqar.sample; - -import java.io.StringReader; - -import javax.json.Json; -import javax.json.JsonObject; -import javax.websocket.Decoder; -import javax.websocket.EndpointConfig; - -public final class JsonDecoder implements Decoder.Text { - - @Override - public JsonObject decode(final String s) { - return Json.createReader(new StringReader(s)).readObject(); - } - - @Override - public void destroy() { - } - - @Override - public void init(final EndpointConfig config) { - } - - @Override - public boolean willDecode(final String s) { - return true; - } - -} \ No newline at end of file diff --git a/samples/java-api-for-websocket/receive_message/SampleZaqarEndpoint.java b/samples/java-api-for-websocket/receive_message/SampleZaqarEndpoint.java deleted file mode 100755 index 3d178732..00000000 --- a/samples/java-api-for-websocket/receive_message/SampleZaqarEndpoint.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -import static java.lang.System.out; - -import java.io.IOException; - -import javax.json.JsonObject; -import javax.websocket.ClientEndpoint; -import javax.websocket.OnMessage; -import javax.websocket.OnOpen; -import javax.websocket.RemoteEndpoint; -import javax.websocket.Session; - -@ClientEndpoint(decoders = JsonDecoder.class) -public final class SampleZaqarEndpoint { - - @OnMessage - public void onMessage(final JsonObject msg) { - - if (msg.getJsonObject("body").getJsonArray("messages") != null) - out.println(msg.getJsonObject("body").getJsonArray("messages") - .getJsonObject(0).getString("body")); - - } - - @OnOpen - public void onOpen(final Session sess) throws IOException { - final RemoteEndpoint.Basic remote = sess.getBasicRemote(); - - final String authenticateMsg = "{\"action\":\"authenticate\"," - + "\"headers\":{\"X-Auth-Token\":" - + "\"8444886dd9b04a1b87ddb502b508261c\",\"X-Project-ID\":" - + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; // refer to bug - // #1553398 - - remote.sendText(authenticateMsg); - - final String claimCreateMsg = "{\"action\":\"claim_create\",\"body\":" - + "{\"queue_name\":\"SampleQueue\"},\"headers\":{\"Client-ID\":" - + "\"355186cd-d1e8-4108-a3ac-a2183697232a\",\"X-Project-ID\":" - + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; - - remote.sendText(claimCreateMsg); - } - -} diff --git a/samples/java-api-for-websocket/send_message/SampleZaqarEndpoint.java b/samples/java-api-for-websocket/send_message/SampleZaqarEndpoint.java deleted file mode 100755 index d9d608a8..00000000 --- a/samples/java-api-for-websocket/send_message/SampleZaqarEndpoint.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -import java.io.IOException; - -import javax.websocket.ClientEndpoint; -import javax.websocket.OnOpen; -import javax.websocket.RemoteEndpoint; -import javax.websocket.Session; - -@ClientEndpoint -public final class SampleZaqarEndpoint { - - @OnOpen - public void onOpen(final Session sess) throws IOException { - final RemoteEndpoint.Basic remote = sess.getBasicRemote(); - - final String authenticateMsg = "{\"action\":\"authenticate\"," - + "\"headers\":{\"X-Auth-Token\":" - + "\"8444886dd9b04a1b87ddb502b508261c\",\"X-Project-ID\":" - + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; // refer to bug - // #1553398 - - remote.sendText(authenticateMsg); - - final String messagePostMsg = "{\"action\":\"message_post\",\"body\":" - + "{\"messages\":[{\"body\":\"Zaqar Sample\"}],\"queue_name\":" - + "\"SampleQueue\"},\"headers\":{\"Client-ID\":" - + "\"355186cd-d1e8-4108-a3ac-a2183697232a\",\"X-Project-ID\":" - + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; - - remote.sendText(messagePostMsg); - } - -} diff --git a/samples/javascript/receive_message/zaqar_sample.js b/samples/javascript/receive_message/zaqar_sample.js deleted file mode 100755 index 3c41e205..00000000 --- a/samples/javascript/receive_message/zaqar_sample.js +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the 'License'); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -const ws = new WebSocket('ws://localhost:9000'); - -ws.onmessage = (e) => { - const msg = JSON.parse(e.data); - - if (msg.body.messages) - console.log(msg.body.messages[0].body); - -}; - -ws.onopen = () => { - ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ - "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ - "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 - - ws.send('{"action": "claim_create", "body": {"queue_name": "SampleQueue"}, \ - "headers": {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", \ - "X-Project-ID": "7530fad032ca431e9dc8ed4a5de5d99c"}}'); -}; diff --git a/samples/javascript/send_message/zaqar_sample.js b/samples/javascript/send_message/zaqar_sample.js deleted file mode 100755 index eee7850f..00000000 --- a/samples/javascript/send_message/zaqar_sample.js +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the 'License'); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -const ws = new WebSocket('ws://localhost:9000'); - -ws.onopen = () => { - ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ - "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ - "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 - - ws.send('{"action": "message_post", "body": {"messages": [{"body": \ - "Zaqar Sample"}], "queue_name": "SampleQueue"}, "headers": \ - {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", "X-Project-ID": \ - "7530fad032ca431e9dc8ed4a5de5d99c"}}'); -}; diff --git a/samples/javascript/websocket.html b/samples/javascript/websocket.html deleted file mode 100644 index 7216a257..00000000 --- a/samples/javascript/websocket.html +++ /dev/null @@ -1,309 +0,0 @@ - - - - - Zaqar WebSocket example - - - - - - - - - -
-

Zaqar WebSocket example

-
- -
-
-
- - - -
-
-
- -
-
-

Queues

-
-
- - -
-
-
-
- - - - - -
-
-
- -
-

Messages

-
-
- - - -
-
- - - - - - - - - - - -
AgeBodyTTL
-
- -
-

Logs

-
-
-
- - diff --git a/samples/jaxrs/receive_message/SampleZaqarServlet.java b/samples/jaxrs/receive_message/SampleZaqarServlet.java deleted file mode 100755 index 844e2acf..00000000 --- a/samples/jaxrs/receive_message/SampleZaqarServlet.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -import java.io.IOException; - -import javax.servlet.annotation.WebServlet; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import javax.ws.rs.client.Client; -import javax.ws.rs.client.ClientBuilder; -import javax.ws.rs.client.Entity; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.Response; - -@SuppressWarnings("serial") -@WebServlet(name = "SampleServlet", value = "/") -public final class SampleZaqarServlet extends HttpServlet { - - @Override - protected void doGet(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { - final Client client = ClientBuilder.newClient(); - - final MultivaluedMap headers = - new MultivaluedHashMap(); - - headers.putSingle("Client-ID", "355186cd-d1e8-4108-a3ac-a2183697232a"); - - headers.putSingle("X-Auth-Token", "8444886dd9b04a1b87ddb502b508261c"); - - headers.putSingle("X-Project-Id", "7530fad032ca431e9dc8ed4a5de5d99c"); - - final Response res = client - .target("http://localhost:8888/v2/queues/SampleQueue/claims") - .request().headers(headers).post(Entity.json("")); - - resp.getWriter().println(res.readEntity(String.class)); - - client.close(); - } - -} diff --git a/samples/jaxrs/send_message/SampleZaqarServlet.java b/samples/jaxrs/send_message/SampleZaqarServlet.java deleted file mode 100755 index 64690f47..00000000 --- a/samples/jaxrs/send_message/SampleZaqarServlet.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -import javax.servlet.annotation.WebServlet; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import javax.ws.rs.client.Client; -import javax.ws.rs.client.ClientBuilder; -import javax.ws.rs.client.Entity; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; - -@SuppressWarnings("serial") -@WebServlet(name = "SampleZaqarServlet", value = "/") -public final class SampleZaqarServlet extends HttpServlet { - - @Override - protected void doGet(final HttpServletRequest req, - final HttpServletResponse resp) { - final Client client = ClientBuilder.newClient(); - - final MultivaluedMap headers = - new MultivaluedHashMap(); - - headers.putSingle("Client-ID", "355186cd-d1e8-4108-a3ac-a2183697232a"); - - headers.putSingle("X-Auth-Token", "8444886dd9b04a1b87ddb502b508261c"); - - headers.putSingle("X-Project-Id", "7530fad032ca431e9dc8ed4a5de5d99c"); - - client.target("http://localhost:8888/v2/queues/SampleQueue/messages") - .request(MediaType.APPLICATION_JSON_TYPE).headers(headers) - .post(Entity - .json("{\"messages\":[{\"body\":\"Zaqar Sample\"}]}")); - - client.close(); - } - -} diff --git a/samples/nodejs/receive_message/zaqar_sample.js b/samples/nodejs/receive_message/zaqar_sample.js deleted file mode 100755 index 62526a23..00000000 --- a/samples/nodejs/receive_message/zaqar_sample.js +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the 'License'); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -const WebSocket = require('ws'); - -const ws = new WebSocket('ws://localhost:9000'); - -ws.on('message', (data, flags) => { - const msg = JSON.parse(data); - - if (msg.body.messages) - console.log(msg.body.messages[0].body); - -}); - -ws.on('open', () => { - ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ - "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ - "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 - - ws.send('{"action": "claim_create", "body": {"queue_name": "SampleQueue"}, \ - "headers": {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", \ - "X-Project-ID": "7530fad032ca431e9dc8ed4a5de5d99c"}}'); -}); diff --git a/samples/nodejs/send_message/zaqar_sample.js b/samples/nodejs/send_message/zaqar_sample.js deleted file mode 100755 index 45df9dbc..00000000 --- a/samples/nodejs/send_message/zaqar_sample.js +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the 'License'); you may not - * use this file except in compliance with the License. You may obtain a copy - * of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -const WebSocket = require('ws'); - -const ws = new WebSocket('ws://localhost:9000'); - -ws.on('open', () => { - ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ - "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ - "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 - - ws.send('{"action": "message_post", "body": {"messages": [{"body": \ - "Zaqar Sample"}], "queue_name": "SampleQueue"}, "headers": \ - {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", "X-Project-ID": \ - "7530fad032ca431e9dc8ed4a5de5d99c"}}'); -}); diff --git a/samples/python-zaqarclient/receive_message/zaqar_sample.py b/samples/python-zaqarclient/receive_message/zaqar_sample.py deleted file mode 100755 index 20faf08e..00000000 --- a/samples/python-zaqarclient/receive_message/zaqar_sample.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -from zaqarclient.queues.v1 import client - -client = client.Client('http://localhost:8888', conf={ - 'auth_opts': { - 'options': { - 'client_uuid': '355186cd-d1e8-4108-a3ac-a2183697232a', - 'os_auth_token': '8444886dd9b04a1b87ddb502b508261c', - 'os_auth_url': 'http://localhost:5000/v3.0/', - 'os_project_id': '7530fad032ca431e9dc8ed4a5de5d99c' - } - } -}, version=2) - -queue = client.queue('SampleQueue') - -claim = queue.claim(ttl=600, grace=600) # refer to bug #1553387 - -for msg in claim: - print(msg) diff --git a/samples/python-zaqarclient/send_message/zaqar_sample.py b/samples/python-zaqarclient/send_message/zaqar_sample.py deleted file mode 100755 index 5a406a47..00000000 --- a/samples/python-zaqarclient/send_message/zaqar_sample.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -from zaqarclient.queues.v1 import client - -client = client.Client('http://localhost:8888', conf={ - 'auth_opts': { - 'options': { - 'client_uuid': '355186cd-d1e8-4108-a3ac-a2183697232a', - 'os_auth_token': '8444886dd9b04a1b87ddb502b508261c', - 'os_auth_url': 'http://localhost:5000/v3.0/', - 'os_project_id': '7530fad032ca431e9dc8ed4a5de5d99c' - } - } -}, version=2) - -queue = client.queue('SampleQueue') - -queue.post([{'body': 'Zaqar Sample'}]) diff --git a/samples/zaqar/subscriber_service_sample.py b/samples/zaqar/subscriber_service_sample.py deleted file mode 100644 index 2c4d38dd..00000000 --- a/samples/zaqar/subscriber_service_sample.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -import json -import logging -from oslo_utils import uuidutils -import requests -import sys - -try: - import SimpleHTTPServer - import SocketServer -except Exception: - from http import server as SimpleHTTPServer - import socketserver as SocketServer - - -_AUTO_CONFIRM = False -for arg in sys.argv: - if arg == '--auto-confirm': - _AUTO_CONFIRM = True - sys.argv.remove(arg) - break - -if len(sys.argv) > 2: - PORT = int(sys.argv[2]) -elif len(sys.argv) > 1: - PORT = int(sys.argv[1]) -else: - PORT = 5678 - - -class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): - """This is the sample service for wsgi subscription. - - """ - - # TODO(wangxiyuan): support websocket. - def do_POST(self): - logging.warning('=================== POST =====================') - data_string = str( - self.rfile.read(int(self.headers['Content-Length']))) - self.data = json.loads(data_string) - if _AUTO_CONFIRM: - self._send_confirm_request() - message = 'OK' - self.send_response(200) - self.end_headers() - self.wfile.write(message) - logging.warning(self.headers) - logging.warning(self.data) - return - - def _send_confirm_request(self): - url = self.data['WSGISubscribeURL'] - headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json', - 'X-Project-ID': self.data['X-Project-ID'], - 'Client-ID': uuidutils.generate_uuid(), - 'URL-Methods': self.data['URL-Methods'], - 'URL-Signature': self.data['URL-Signature'], - 'URL-Paths': self.data['URL-Paths'], - 'URL-Expires': self.data['URL-Expires'], - } - data = {'confirmed': True} - requests.put(url=url, data=json.dumps(data), headers=headers) - -Handler = ServerHandler -httpd = SocketServer.TCPServer(("", PORT), Handler) -httpd.serve_forever() diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 6843e826..00000000 --- a/setup.cfg +++ /dev/null @@ -1,123 +0,0 @@ -[metadata] -name = zaqar -summary = OpenStack Queuing and Notification Service -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://docs.openstack.org/zaqar/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3.5 - -[files] -packages = - zaqar - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[entry_points] -console_scripts = - zaqar-bench = zaqar.bench.conductor:main - zaqar-server = zaqar.cmd.server:run - zaqar-gc = zaqar.cmd.gc:run - zaqar-sql-db-manage = zaqar.storage.sqlalchemy.migration.cli:main - -zaqar.data.storage = - mongodb = zaqar.storage.mongodb.driver:DataDriver - mongodb.fifo = zaqar.storage.mongodb.driver:FIFODataDriver - redis = zaqar.storage.redis.driver:DataDriver - swift = zaqar.storage.swift.driver:DataDriver - faulty = zaqar.tests.faulty_storage:DataDriver - -zaqar.control.storage = - sqlalchemy = zaqar.storage.sqlalchemy.driver:ControlDriver - mongodb = zaqar.storage.mongodb.driver:ControlDriver - redis = zaqar.storage.redis.driver:ControlDriver - faulty = zaqar.tests.faulty_storage:ControlDriver - -zaqar.transport = - wsgi = zaqar.transport.wsgi.driver:Driver - websocket = zaqar.transport.websocket.driver:Driver - -oslo.config.opts = - zaqar.common.auth = zaqar.common.auth:_config_options - zaqar.common.configs = zaqar.common.configs:_config_options - zaqar.storage.pipeline = zaqar.storage.pipeline:_config_options - zaqar.storage.pooling = zaqar.storage.pooling:_config_options - zaqar.storage.mongodb = zaqar.storage.mongodb.options:_config_options - zaqar.storage.redis = zaqar.storage.redis.options:_config_options - zaqar.storage.sqlalchemy = zaqar.storage.sqlalchemy.options:_config_options - zaqar.storage.swift = zaqar.storage.swift.options:_config_options - zaqar.transport.wsgi = zaqar.transport.wsgi.driver:_config_options - zaqar.transport.websocket = zaqar.transport.websocket.driver:_config_options - zaqar.transport.base = zaqar.transport.base:_config_options - zaqar.transport.validation = zaqar.transport.validation:_config_options - -zaqar.storage.stages = - zaqar.notification.notifier = zaqar.notification.notifier:NotifierDriver - -zaqar.storage.mongodb.driver.queue.stages = - message_queue_handler = zaqar.storage.mongodb.messages:MessageQueueHandler - -zaqar.storage.redis.driver.queue.stages = - message_queue_handler = zaqar.storage.redis.messages:MessageQueueHandler - -zaqar.storage.swift.driver.queue.stages = - message_queue_handler = zaqar.storage.swift.messages:MessageQueueHandler - -zaqar.notification.tasks = - http = zaqar.notification.tasks.webhook:WebhookTask - https = zaqar.notification.tasks.webhook:WebhookTask - mailto = zaqar.notification.tasks.mailto:MailtoTask - trust+http = zaqar.notification.tasks.trust:TrustTask - trust+https = zaqar.notification.tasks.trust:TrustTask - -tempest.test_plugins = - zaqar_tests = zaqar.tests.tempest_plugin.plugin:ZaqarTempestPlugin - -[nosetests] -where=zaqar/tests -verbosity=2 - -with-doctest = true - -cover-package = zaqar -cover-html = true -cover-erase = true -cover-inclusive = true - -; Disabled: Causes a bug in testtools to manifest. -; Trigger: self.assertX(condition), where condition == False. -; -; In "testtools/testresult/real.py" the traceback is set to -; None in _details_to_exc_info(), but the inspect_traceback() -; method in nose/inspector.py requires a traceback-like object. -; -; detailed-errors = 1 - - -[compile_catalog] -directory = zaqar/locale -domain = zaqar - -[update_catalog] -domain = zaqar -output_dir = zaqar/locale -input_file = zaqar/locale/zaqar.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = zaqar/locale/zaqar.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d8443..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index fedc080e..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,40 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -# Metrics and style -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 - -# Packaging -mock>=2.0 # BSD - -# Backends -redis>=2.10.0 # MIT -pymongo!=3.1,>=3.0.2 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -websocket-client>=0.32.0 # LGPLv2+ -PyMySQL>=0.7.6 # MIT License - -# Unit testing -coverage!=4.4,>=4.0 # Apache-2.0 -ddt>=1.0.1 # MIT -fixtures>=3.0.0 # Apache-2.0/BSD -python-subunit>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -oslo.db>=4.24.0 # Apache-2.0 -testresources>=0.2.4 # Apache-2.0/BSD -os-testr>=0.8.0 # Apache-2.0 - -# Documentation -sphinx>=1.6.2 # BSD -openstackdocstheme>=1.11.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -reno!=2.3.1,>=1.8.0 # Apache-2.0 -os-api-ref>=1.0.0 # Apache-2.0 - -# Tempest -tempest>=16.1.0 # Apache-2.0 - -#OSprofiler -osprofiler>=1.4.0 # Apache-2.0 diff --git a/tools/doc/find_autodoc_modules.sh b/tools/doc/find_autodoc_modules.sh deleted file mode 100755 index 4986ba0a..00000000 --- a/tools/doc/find_autodoc_modules.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -ZAQAR_DIR='../../zaqar/' # include trailing slash -DOCS_DIR='source' - -modules='' -for x in `find ${ZAQAR_DIR} -name '*.py' | grep -v zaqar/tests | grep -v zaqar/bench`; do - if [ `basename ${x} .py` == "__init__" ] ; then - continue - fi - relative=zaqar.`echo ${x} | sed -e 's$^'${ZAQAR_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` - modules="${modules} ${relative}" -done - -for mod in ${modules} ; do - if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; - then - echo ${mod} - fi -done \ No newline at end of file diff --git a/tools/doc/generate_autodoc_index.sh b/tools/doc/generate_autodoc_index.sh deleted file mode 100755 index 62e50ba1..00000000 --- a/tools/doc/generate_autodoc_index.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -SOURCEDIR=../../doc/source/api - -if [ ! -d ${SOURCEDIR} ] ; then - mkdir -p ${SOURCEDIR} -fi - -for x in `./find_autodoc_modules.sh`; -do - echo "Generating ${SOURCEDIR}/${x}.rst" - echo "${SOURCEDIR}/${x}.rst" >> .autogenerated - heading="The :mod:\`${x}\` module" - # Figure out how long the heading is - # and make sure to emit that many '=' under - # it to avoid heading format errors - # in Sphinx. - heading_len=$(echo "$heading" | wc -c) - underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') - ( cat < ${SOURCEDIR}/${x}.rst - -done - -if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then - - cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst - done - - echo ${SOURCEDIR}/autoindex.rst >> .autogenerated -fi \ No newline at end of file diff --git a/tools/test-setup.sh b/tools/test-setup.sh deleted file mode 100755 index 2bf33a74..00000000 --- a/tools/test-setup.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -xe - -# This script will be run by OpenStack CI before unit tests are run, -# it sets up the test system as needed. -# Developers should setup their test systems in a similar way. - -# This setup needs to be run as a user that can run sudo. - -# The root password for the MySQL database; pass it in via -# MYSQL_ROOT_PW. -DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} - -# This user and its password are used by the tests, if you change it, -# your tests might fail. -DB_USER=openstack_citest -DB_PW=openstack_citest - -sudo -H mysqladmin -u root password $DB_ROOT_PW - -# It's best practice to remove anonymous users from the database. If -# a anonymous user exists, then it matches first for connections and -# other connections from that host will not work. -sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " - DELETE FROM mysql.user WHERE User=''; - FLUSH PRIVILEGES; - GRANT ALL PRIVILEGES ON *.* - TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" - -# Now create our database. -mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " - SET default_storage_engine=MYISAM; - DROP DATABASE IF EXISTS openstack_citest; - CREATE DATABASE openstack_citest CHARACTER SET utf8;" \ No newline at end of file diff --git a/tox.ini b/tox.ini deleted file mode 100644 index d4267771..00000000 --- a/tox.ini +++ /dev/null @@ -1,79 +0,0 @@ -[tox] -minversion = 1.6 -envlist = py35,py27,pypy,pep8 -skipsdist = True - -[testenv] -usedevelop = True -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -setenv = VIRTUAL_ENV={envdir} - ZAQAR_TESTS_CONFIGS_DIR={toxinidir}/zaqar/tests/etc/ - ZAQAR_TEST_MONGODB=1 - ZAQAR_TEST_SLOW=1 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - find . -type f -name "*.pyc" -delete - ostestr --concurrency 1 {posargs} -whitelist_externals = find - -[testenv:pypy] -setenv = {[testenv]setenv} - JIT_FLAG=--jit off - -[testenv:integration] -setenv = {[testenv]setenv} - ZAQAR_TEST_INTEGRATION=1 - OS_TEST_PATH=./zaqar/tests/functional -commands = ostestr --concurrency 1 {posargs} - -[testenv:pep8] -commands = flake8 - -[testenv:genconfig] -commands = - oslo-config-generator --config-file etc/oslo-config-generator/zaqar.conf - -[testenv:cover] -commands = - python setup.py testr --coverage \ - --testr-args='^(?!.*test.*coverage).*$' - coverage report - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -commands = - python setup.py build_sphinx - sphinx-build -W -b html api-ref/source api-ref/build/html - -[testenv:api-ref] -# This environment is called from CI scripts to test and publish -# the API Ref to developer.openstack.org. -# -whitelist_externals = bash - rm -commands = - rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html - -[testenv:debug] -commands = oslo_debug_helper {posargs} - -[testenv:releasenotes] -commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[flake8] -exclude = .venv*,.git,.tox,dist,doc,*lib/python*,*.egg,.update-venv -# NOTE(flaper87): Our currently max-complexity is 15. Not sure what the ideal complexity -# for Zaqar should be but lets keep it to the minimum possible. -max-complexity = 16 -# [H904] Delay string interpolations at logging calls. -enable-extensions=H904 - -[hacking] -local-check-factory = zaqar.hacking.checks.factory - -[testenv:install-guide] -commands = sphinx-build -a -E -W -d install-guide/build/doctrees -b html install-guide/source install-guide/build/html diff --git a/zaqar/__init__.py b/zaqar/__init__.py deleted file mode 100644 index 4819a025..00000000 --- a/zaqar/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import zaqar.bootstrap -import zaqar.version - -Bootstrap = zaqar.bootstrap.Bootstrap - - -__version__ = zaqar.version.version_info.cached_version_string() diff --git a/zaqar/api/__init__.py b/zaqar/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/api/handler.py b/zaqar/api/handler.py deleted file mode 100644 index 0b021447..00000000 --- a/zaqar/api/handler.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from zaqar.api.v2 import endpoints -from zaqar.api.v2 import request as schema_validator - -from zaqar.common.api import request -from zaqar.common.api import response -from zaqar.common import consts -from zaqar.common import errors -from zaqar.common import urls - - -class Handler(object): - """Defines API handler - - The handler validates and process the requests - """ - - _actions_mapping = { - consts.MESSAGE_LIST: 'GET', - consts.MESSAGE_GET: 'GET', - consts.MESSAGE_GET_MANY: 'GET', - consts.MESSAGE_POST: 'POST', - consts.MESSAGE_DELETE: 'DELETE', - consts.MESSAGE_DELETE_MANY: 'DELETE' - } - - def __init__(self, storage, control, validate, defaults): - self.v2_endpoints = endpoints.Endpoints(storage, control, - validate, defaults) - self._subscription_factory = None - - def set_subscription_factory(self, factory): - self._subscription_factory = factory - - def clean_subscriptions(self, subscriptions): - for resp in subscriptions: - body = {'queue_name': resp._request._body.get('queue_name'), - 'subscription_id': resp._body.get('subscription_id')} - payload = {'body': body, 'headers': resp._request._headers} - req = self.create_request(payload) - self.v2_endpoints.subscription_delete(req) - - def process_request(self, req, protocol): - # FIXME(vkmc): Control API version - if req._action == consts.SUBSCRIPTION_CREATE: - subscriber = req._body.get('subscriber') - if not subscriber: - # Default to the connected websocket as subscriber - subscriber = self._subscription_factory.get_subscriber( - protocol) - return self.v2_endpoints.subscription_create(req, subscriber) - - return getattr(self.v2_endpoints, req._action)(req) - - @staticmethod - def validate_request(payload, req): - """Validate a request and its payload against a schema. - - :return: a Response object if validation failed, None otherwise. - """ - try: - action = payload.get('action') - validator = schema_validator.RequestSchema() - is_valid = validator.validate(action=action, body=payload) - except errors.InvalidAction as ex: - body = {'error': str(ex)} - headers = {'status': 400} - return response.Response(req, body, headers) - else: - if not is_valid: - body = {'error': 'Schema validation failed.'} - headers = {'status': 400} - return response.Response(req, body, headers) - - def create_response(self, code, body, req=None): - if req is None: - req = self.create_request() - headers = {'status': code} - return response.Response(req, body, headers) - - @staticmethod - def create_request(payload=None, env=None): - if payload is None: - payload = {} - action = payload.get('action') - body = payload.get('body', {}) - headers = payload.get('headers') - - return request.Request(action=action, body=body, - headers=headers, api="v2", env=env) - - def get_defaults(self): - return self.v2_endpoints._defaults - - def verify_signature(self, key, payload): - action = payload.get('action') - method = self._actions_mapping.get(action) - - headers = payload.get('headers', {}) - project = headers.get('X-Project-ID') - expires = headers.get('URL-Expires') - methods = headers.get('URL-Methods') - paths = headers.get('URL-Paths') - signature = headers.get('URL-Signature') - - if not method or method not in methods: - return False - - try: - verified = urls.verify_signed_headers_data(key, paths, - project=project, - methods=methods, - expires=expires, - signature=signature) - except ValueError: - return False - - return verified diff --git a/zaqar/api/v1/__init__.py b/zaqar/api/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/api/v1/request.py b/zaqar/api/v1/request.py deleted file mode 100644 index 0c8d3555..00000000 --- a/zaqar/api/v1/request.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.common.api import api -from zaqar.common import consts - - -class RequestSchema(api.Api): - - headers = { - 'User-Agent': {'type': 'string'}, - 'Date': {'type': 'string'}, - 'Accept': {'type': 'string'}, - 'Client-ID': {'type': 'string'}, - 'X-Project-ID': {'type': 'string'}, - 'X-Auth-Token': {'type': 'string'} - } - - schema = { - - # Base - 'get_home_doc': { - 'properties': { - 'action': {'enum': ['get_home_doc']}, - 'headers': { - 'type': 'object', - 'properties': headers, - } - }, - 'required': ['action', 'headers'], - 'admin': True, - }, - - 'check_node_health': { - 'properties': { - 'action': {'enum': ['check_node_health']}, - 'headers': { - 'type': 'object', - 'properties': headers, - } - }, - 'required': ['action', 'headers'], - 'admin': True, - }, - - 'ping_node': { - 'properties': { - 'action': {'enum': ['ping_node']}, - 'headers': { - 'type': 'object', - 'properties': headers, - } - }, - 'required': ['action', 'headers'], - 'admin': True, - }, - 'authenticate': { - 'properties': { - 'action': {'enum': ['authenticate']}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['X-Project-ID', 'X-Auth-Token'] - } - }, - 'required': ['action', 'headers'], - }, - - # Queues - consts.QUEUE_LIST: { - 'properties': { - 'action': {'enum': [consts.QUEUE_LIST]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'marker': {'type': 'string'}, - 'limit': {'type': 'integer'}, - 'detailed': {'type': 'boolean'} - } - } - }, - 'required': ['action', 'headers'] - }, - - consts.QUEUE_CREATE: { - 'properties': { - 'action': {'enum': [consts.QUEUE_CREATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID']}, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.QUEUE_DELETE: { - 'properties': { - 'action': {'enum': [consts.QUEUE_DELETE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - }, - 'required': ['queue_name'] - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.QUEUE_GET: { - 'properties': { - 'action': {'enum': [consts.QUEUE_GET]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.QUEUE_GET_STATS: { - 'properties': { - 'action': {'enum': [consts.QUEUE_GET_STATS]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True - }, - - # Messages - consts.MESSAGE_LIST: { - 'properties': { - 'action': {'enum': [consts.MESSAGE_LIST]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'marker': {'type': 'string'}, - 'limit': {'type': 'integer'}, - 'echo': {'type': 'boolean'}, - 'include_claimed': {'type': 'boolean'}, - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.MESSAGE_GET: { - 'properties': { - 'action': {'enum': [consts.MESSAGE_GET]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'message_id': {'type': 'string'}, - }, - 'required': ['queue_name', 'message_id'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.MESSAGE_GET_MANY: { - 'properties': { - 'action': {'enum': [consts.MESSAGE_GET_MANY]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'message_ids': {'type': 'array'}, - }, - 'required': ['queue_name', 'message_ids'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.MESSAGE_POST: { - 'properties': { - 'action': {'enum': [consts.MESSAGE_POST]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'messages': {'type': 'array'}, - }, - 'required': ['queue_name', 'messages'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.MESSAGE_DELETE: { - 'properties': { - 'action': {'enum': [consts.MESSAGE_DELETE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'message_id': {'type': 'string'}, - 'claim_id': {'type': 'string'} - }, - 'required': ['queue_name', 'message_id'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.MESSAGE_DELETE_MANY: { - 'properties': { - 'action': {'enum': [consts.MESSAGE_DELETE_MANY]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'message_ids': {'type': 'array'}, - 'pop': {'type': 'integer'} - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - # Claims - consts.CLAIM_CREATE: { - 'properties': { - 'action': {'enum': [consts.CLAIM_CREATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'limit': {'type': 'integer'}, - 'ttl': {'type': 'integer'}, - 'grace': {'type': 'integer'} - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.CLAIM_GET: { - 'properties': { - 'action': {'enum': [consts.CLAIM_GET]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'claim_id': {'type': 'string'} - }, - 'required': ['queue_name', 'claim_id'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.CLAIM_UPDATE: { - 'properties': { - 'action': {'enum': [consts.CLAIM_UPDATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'claim_id': {'type': 'string'}, - 'ttl': {'type': 'integer'} - }, - 'required': ['queue_name', 'claim_id'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.CLAIM_DELETE: { - 'properties': { - 'action': {'enum': [consts.CLAIM_DELETE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'claim_id': {'type': 'string'} - }, - 'required': ['queue_name', 'claim_id'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - } diff --git a/zaqar/api/v1/response.py b/zaqar/api/v1/response.py deleted file mode 100644 index 6b0fe19d..00000000 --- a/zaqar/api/v1/response.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.common.api import api -from zaqar.common import consts - - -class ResponseSchema(api.Api): - - """Define validation schema for json response.""" - - def __init__(self, limits): - self.limits = limits - - age = { - "type": "number", - "minimum": 0 - } - - message = { - "type": "object", - "properties": { - "href": { - "type": "string", - "pattern": "^(/v1/queues/[a-zA-Z0-9_-]" - "{1,64}/messages/[a-zA-Z0-9_-]+)$" - }, - "age": age, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body"], - "additionalProperties": False, - } - - claim_href = { - "type": "string", - "pattern": "^(/v1/queues/[a-zA-Z0-9_-]{1,64}" - "/messages/[a-zA-Z0-9_-]+)" - "\?claim_id=[a-zA-Z0-9_-]+$" - } - - self.schema = { - consts.QUEUE_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string', - 'enum': ['next'], - }, - 'href': { - 'type': 'string', - "pattern": "^/v1/queues\?", - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False, - }, - 'minItems': 1, - 'maxItems': 1, - }, - 'queues': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'pattern': '^[a-zA-Z0-9_-]{1,64}$' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1/queues/' - '[a-zA-Z0-9_-]{1,64}$', - }, - 'metadata': { - 'type': 'object', - } - }, - 'required': ['name', 'href'], - 'additionalProperties': False, - }, - 'minItems': 1, - 'maxItems': self.limits.max_queues_per_page, - } - }, - 'required': ['links', 'queues'], - 'additionalProperties': False, - }, - consts.QUEUE_GET_STATS: { - 'type': 'object', - 'properties': { - 'messages': { - 'type': 'object', - 'properties': { - 'free': { - 'type': 'number', - 'minimum': 0 - }, - 'claimed': { - 'type': 'number', - 'minimum': 0 - }, - 'total': { - 'type': 'number', - 'minimum': 0 - }, - 'oldest': { - 'type': 'object' - }, - 'newest': { - 'type': 'object' - } - - }, - 'required': ['free', 'claimed', 'total'], - 'additionalProperties': False - } - }, - 'required': ['messages'], - 'additionalProperties': False - }, - - consts.POOL_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1/pools\?' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'pools': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'pattern': '^/v1/' - 'pools/[a-zA-Z0-9_-]{1,64}$' - }, - 'weight': { - 'type': 'number', - 'minimum': -1 - }, - 'name': { - 'type': 'string' - }, - 'uri': { - 'type': 'string' - }, - 'options': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['href', 'weight', 'uri'], - 'additionalProperties': False, - }, - } - }, - 'required': ['links', 'pools'], - 'additionalProperties': False - }, - - consts.MESSAGE_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1/queues/[a-zA-Z0-9_-]+' - '/messages\?(.)*$' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'messages': { - "type": "array", - "items": message, - "minItems": 1, - "maxItems": self.limits.max_messages_per_claim_or_pop - } - } - }, - consts.MESSAGE_GET_MANY: { - "type": "array", - "items": message, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - }, - - consts.CLAIM_CREATE: { - "type": "array", - "items": { - "type": "object", - "properties": { - "href": claim_href, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - "age": age, - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body"], - "additionalProperties": False, - }, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - }, - - consts.CLAIM_GET: { - 'type': 'object', - 'properties': { - 'age': age, - 'ttl': { - 'type': 'number', - 'minimum': 0, - 'maximum': self.limits.max_claim_ttl - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1/queues/[a-zA-Z0-9_-]+' - '/claims/[a-zA-Z0-9_-]+$' - }, - 'messages': { - "type": "array", - "items": { - "type": "object", - "properties": { - "href": claim_href, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - "age": age, - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body"], - "additionalProperties": False, - }, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['age', 'ttl', 'messages', 'href'], - 'additionalProperties': False - } - } diff --git a/zaqar/api/v1_1/__init__.py b/zaqar/api/v1_1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/api/v1_1/request.py b/zaqar/api/v1_1/request.py deleted file mode 100644 index eacb19b0..00000000 --- a/zaqar/api/v1_1/request.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.api.v1 import request as v1 -from zaqar.common import consts - - -class RequestSchema(v1.RequestSchema): - - headers = v1.RequestSchema.headers - schema = v1.RequestSchema.schema - - schema.update({ - - # Pools - consts.POOL_LIST: { - 'properties': { - 'action': {'enum': [consts.POOL_LIST]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'pool_name': {'type': 'string'}, - 'limit': {'type': 'integer'}, - 'marker': {'type': 'string'} - }, - 'required': ['pool_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.POOL_CREATE: { - 'properties': { - 'action': {'enum': [consts.POOL_CREATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'pool_name': {'type': 'string'}, - 'weight': {'type': 'integer'}, - 'uri': {'type': 'string'}, - 'options': {'type': 'object'}, - }, - 'required': ['pool_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.POOL_UPDATE: { - 'properties': { - 'action': {'enum': [consts.POOL_UPDATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'pool_name': {'type': 'string'}, - 'weight': {'type': 'integer'}, - 'uri': {'type': 'string'}, - 'options': {'type': 'object'}, - }, - 'required': ['pool_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.POOL_GET: { - 'properties': { - 'action': {'enum': [consts.POOL_GET]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'pool_name': {'type': 'string'}, - 'detailed': {'type': 'boolean'} - }, - 'required': ['pool_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.POOL_DELETE: { - 'properties': { - 'action': {'enum': [consts.POOL_DELETE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'pool_name': {'type': 'string'} - }, - 'required': ['pool_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - # Flavors - consts.FLAVOR_LIST: { - 'properties': { - 'action': {'enum': [consts.FLAVOR_LIST]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'flavor_name': {'type': 'string'}, - 'limit': {'type': 'integer'}, - 'marker': {'type': 'string'} - }, - 'required': ['flavor_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.FLAVOR_CREATE: { - 'properties': { - 'action': {'enum': [consts.FLAVOR_CREATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'flavor_name': {'type': 'string'}, - 'pool_name': {'type': 'string'}, - 'capabilities': {'type': 'object'}, - }, - 'required': ['flavor_name', 'pool_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.FLAVOR_UPDATE: { - 'properties': { - 'action': {'enum': [consts.FLAVOR_UPDATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'flavor_name': {'type': 'string'}, - 'pool_name': {'type': 'string'}, - 'capabilities': {'type': 'object'}, - }, - 'required': ['flavor_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.FLAVOR_GET: { - 'properties': { - 'action': {'enum': [consts.FLAVOR_GET]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'flavor_name': {'type': 'string'}, - 'detailed': {'type': 'boolean'} - }, - 'required': ['flavor_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - - consts.FLAVOR_DELETE: { - 'properties': { - 'action': {'enum': [consts.FLAVOR_DELETE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'flavor_name': {'type': 'string'} - }, - 'required': ['flavor_name'], - } - }, - 'required': ['action', 'headers', 'body'], - 'admin': True, - }, - }) diff --git a/zaqar/api/v1_1/response.py b/zaqar/api/v1_1/response.py deleted file mode 100644 index 4964ee07..00000000 --- a/zaqar/api/v1_1/response.py +++ /dev/null @@ -1,411 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.common.api import api -from zaqar.common import consts - - -class ResponseSchema(api.Api): - - """Define validation schema for json response.""" - - def __init__(self, limits): - self.limits = limits - - age = { - "type": "number", - "minimum": 0 - } - - message = { - "type": "object", - "properties": { - "id": { - "type": "string", - }, - "href": { - "type": "string", - "pattern": "^(/v1\.1/queues/[a-zA-Z0-9_-]{1,64}" - "/messages/[a-zA-Z0-9_-]+)(\?claim_id=[a-zA-Z0-9_-]+)?$" - }, - "age": age, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body", "id"], - "additionalProperties": False, - } - - claim_href = { - "type": "string", - "pattern": "^(/v1\.1/queues/[a-zA-Z0-9_-]{1,64}" - "/messages/[a-zA-Z0-9_-]+)" - "\?claim_id=[a-zA-Z0-9_-]+$" - } - - flavor = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/flavors/[a-zA-Z0-9_-]{1,64}$' - }, - 'pool': { - 'type': 'string', - }, - 'project': { - 'type': 'string' - }, - 'capabilities': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['href', 'pool', 'project'], - 'additionalProperties': False, - } - - self.schema = { - consts.MESSAGE_GET_MANY: { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": message, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['messages'], - 'additionalProperties': False, - }, - - consts.QUEUE_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string', - 'enum': ['next'], - }, - 'href': { - 'type': 'string', - "pattern": "^/v1\.1/queues\?", - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False, - }, - 'minItems': 1, - 'maxItems': 1, - }, - - 'queues': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'pattern': '^[a-zA-Z0-9_-]{1,64}$' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/queues/' - '[a-zA-Z0-9_-]{1,64}$', - }, - 'metadata': { - 'type': 'object', - } - }, - 'required': ['name', 'href'], - 'additionalProperties': False, - }, - 'minItems': 1, - 'maxItems': self.limits.max_queues_per_page, - } - }, - 'required': ['links', 'queues'], - 'additionalProperties': False, - }, - - consts.QUEUE_GET_STATS: { - 'type': 'object', - 'properties': { - 'messages': { - 'type': 'object', - 'properties': { - 'free': { - 'type': 'number', - 'minimum': 0 - }, - 'claimed': { - 'type': 'number', - 'minimum': 0 - }, - 'total': { - 'type': 'number', - 'minimum': 0 - }, - 'oldest': { - 'type': 'object' - }, - 'newest': { - 'type': 'object' - } - - }, - 'required': ['free', 'claimed', 'total'], - 'additionalProperties': False - } - }, - 'required': ['messages'], - 'additionalProperties': False - }, - - consts.POOL_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/pools\?' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'pools': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/' - 'pools/[a-zA-Z0-9_-]{1,64}$' - }, - 'weight': { - 'type': 'number', - 'minimum': -1 - }, - 'name': { - 'type': 'string' - }, - 'uri': { - 'type': 'string' - }, - 'group': { - 'type': ['string', 'null'] - }, - 'options': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['href', 'weight', 'uri', 'group'], - 'additionalProperties': False, - }, - } - }, - 'required': ['links', 'pools'], - 'additionalProperties': False - }, - - consts.MESSAGE_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/queues/[a-zA-Z0-9_-]+' - '/messages\?(.)*$' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'messages': { - "type": "array", - "items": message, - "minItems": 0, - "maxItems": self.limits.max_messages_per_claim_or_pop - } - } - }, - consts.POOL_GET_DETAIL: { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string' - }, - 'uri': { - 'type': 'string' - }, - 'group': { - 'type': ['string', 'null'] - }, - 'weight': { - 'type': 'number', - 'minimum': -1 - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/pools/' - '[a-zA-Z0-9_\-]+$' - }, - 'options': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['uri', 'weight', 'href'], - 'additionalProperties': False - }, - - consts.CLAIM_CREATE: { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - }, - "href": claim_href, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - "age": age, - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body", "id"], - "additionalProperties": False, - }, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['messages'], - 'additionalProperties': False - }, - - consts.CLAIM_GET: { - 'type': 'object', - 'properties': { - 'age': age, - 'ttl': { - 'type': 'number', - 'minimum': 0, - 'maximum': self.limits.max_claim_ttl - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/queues/[a-zA-Z0-9_-]+' - '/claims/[a-zA-Z0-9_-]+$' - }, - 'messages': { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - }, - "href": claim_href, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - "age": age, - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body", "id"], - "additionalProperties": False, - }, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['age', 'ttl', 'messages', 'href'], - 'additionalProperties': False - }, - - consts.FLAVOR_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v1\.1/flavors\?' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'flavors': { - 'type': 'array', - 'items': flavor, - } - }, - 'required': ['links', 'flavors'], - 'additionalProperties': False - } - - } diff --git a/zaqar/api/v2/__init__.py b/zaqar/api/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/api/v2/endpoints.py b/zaqar/api/v2/endpoints.py deleted file mode 100644 index 20c1fb83..00000000 --- a/zaqar/api/v2/endpoints.py +++ /dev/null @@ -1,969 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from stevedore import driver - -from oslo_log import log as logging -from oslo_utils import netutils - -from zaqar.common.api import errors as api_errors -from zaqar.common.api import response -from zaqar.common.api import utils as api_utils -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import validation - -LOG = logging.getLogger(__name__) - - -class Endpoints(object): - """v2 API Endpoints.""" - - def __init__(self, storage, control, validate, defaults): - self._queue_controller = storage.queue_controller - self._message_controller = storage.message_controller - self._claim_controller = storage.claim_controller - self._subscription_controller = storage.subscription_controller - - self._pools_controller = control.pools_controller - self._flavors_controller = control.flavors_controller - - self._validate = validate - - self._defaults = defaults - self._subscription_url = None - - # Queues - @api_utils.on_exception_sends_500 - def queue_list(self, req): - """Gets a list of queues - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - - LOG.debug(u'Queue list - project: %(project)s', - {'project': project_id}) - - try: - kwargs = api_utils.get_headers(req) - - self._validate.queue_listing(**kwargs) - results = self._queue_controller.list( - project=project_id, **kwargs) - # Buffer list of queues. Can raise NoPoolFound error. - queues = list(next(results)) - except (ValueError, validation.ValidationFailed) as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = 'Queues could not be listed.' - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - - # Got some. Prepare the response. - body = {'queues': queues} - headers = {'status': 200} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def queue_create(self, req): - """Creates a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - metadata = req._body.get('metadata', {}) - - LOG.debug(u'Queue create - queue: %(queue)s, project: %(project)s', - {'queue': queue_name, - 'project': project_id}) - - try: - self._validate.queue_identification(queue_name, project_id) - self._validate.queue_metadata_length(len(str(metadata))) - self._validate.queue_metadata_putting(metadata) - created = self._queue_controller.create(queue_name, - metadata=metadata, - project=project_id) - except validation.ValidationFailed as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = _('Queue %s could not be created.') % queue_name - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - else: - body = _('Queue %s created.') % queue_name - headers = {'status': 201} if created else {'status': 204} - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def queue_delete(self, req): - """Deletes a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Queue delete - queue: %(queue)s, project: %(project)s', - {'queue': queue_name, 'project': project_id}) - try: - self._queue_controller.delete(queue_name, project=project_id) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = _('Queue %s could not be deleted.') % queue_name - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - else: - body = _('Queue %s removed.') % queue_name - headers = {'status': 204} - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def queue_get(self, req): - """Gets a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Queue get - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - resp_dict = self._queue_controller.get(queue_name, - project=project_id) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - error = _('Queue %s does not exist.') % queue_name - headers = {'status': 404} - return api_utils.error_response(req, ex, headers, error) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - headers = {'status': 503} - error = _('Cannot retrieve queue %s.') % queue_name - return api_utils.error_response(req, ex, headers, error) - else: - body = resp_dict - headers = {'status': 200} - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def queue_get_stats(self, req): - """Gets queue stats - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Get queue stats - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - resp_dict = self._queue_controller.stats(queue_name, - project=project_id) - body = resp_dict - except storage_errors.QueueDoesNotExist as ex: - LOG.exception(ex) - resp_dict = { - 'messages': { - 'claimed': 0, - 'free': 0, - 'total': 0 - } - } - body = resp_dict - headers = {'status': 404} - return response.Response(req, body, headers) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = _('Cannot retrieve queue %s stats.') % queue_name - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - else: - headers = {'status': 200} - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def queue_purge(self, req): - """Purge queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - resource_types = req._body.get('resource_types', ["messages", - "subscriptions"]) - - LOG.debug(u'Purge queue - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - pop_limit = 100 - if "messages" in resource_types: - LOG.debug("Purge all messages under queue %s", queue_name) - resp = self._pop_messages(req, queue_name, - project_id, pop_limit) - while resp.get_response()['body']['messages']: - resp = self._pop_messages(req, queue_name, - project_id, pop_limit) - - if "subscriptions" in resource_types: - LOG.debug("Purge all subscriptions under queue %s", - queue_name) - resp = self._subscription_controller.list(queue_name, - project=project_id) - subscriptions = list(next(resp)) - for sub in subscriptions: - self._subscription_controller.delete(queue_name, - sub['id'], - project=project_id) - - except storage_errors.QueueDoesNotExist as ex: - LOG.exception(ex) - headers = {'status': 404} - return api_utils.error_response(req, ex, headers) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - headers = {'status': 503} - return api_utils.error_response(req, ex, headers) - else: - headers = {'status': 204} - return response.Response(req, {}, headers) - - # Messages - @api_utils.on_exception_sends_500 - def message_list(self, req): - """Gets a list of messages on a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Message list - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - kwargs = api_utils.get_headers(req) - - client_uuid = api_utils.get_client_uuid(req) - - self._validate.message_listing(**kwargs) - results = self._message_controller.list( - queue_name, - project=project_id, - client_uuid=client_uuid, - **kwargs) - - # Buffer messages - cursor = next(results) - messages = list(cursor) - except (ValueError, api_errors.BadRequest, - validation.ValidationFailed) as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - headers = {'status': 404} - return api_utils.error_response(req, ex, headers) - - if messages: - # Found some messages, so prepare the response - kwargs['marker'] = next(results) - messages = [api_utils.format_message(message) - for message in messages] - - headers = {'status': 200} - body = {'messages': messages} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def message_get(self, req): - """Gets a message from a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - message_id = req._body.get('message_id') - - LOG.debug(u'Message get - message: %(message)s, ' - u'queue: %(queue)s, project: %(project)s', - {'message': message_id, - 'queue': queue_name, - 'project': project_id}) - try: - message = self._message_controller.get( - queue_name, - message_id, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - headers = {'status': 404} - return api_utils.error_response(req, ex, headers) - - # Prepare response - message = api_utils.format_message(message) - - headers = {'status': 200} - body = {'messages': message} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def message_get_many(self, req): - """Gets a set of messages from a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - message_ids = list(req._body.get('message_ids')) - - LOG.debug(u'Message get - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - self._validate.message_listing(limit=len(message_ids)) - messages = self._message_controller.bulk_get( - queue_name, - message_ids=message_ids, - project=project_id) - except validation.ValidationFailed as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - - # Prepare response - messages = list(messages) - messages = [api_utils.format_message(message) - for message in messages] - - headers = {'status': 200} - body = {'messages': messages} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def message_post(self, req): - """Post a set of messages to a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Messages post - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - messages = req._body.get('messages') - - if messages is None: - ex = _(u'Invalid request.') - error = _(u'No messages were found in the request body.') - headers = {'status': 400} - return api_utils.error_response(req, ex, headers, error) - - try: - # NOTE(flwang): Replace 'exists' with 'get_metadata' won't impact - # the performance since both of them will call - # collection.find_one() - queue_meta = None - try: - queue_meta = self._queue_controller.get_metadata(queue_name, - project_id) - except storage_errors.DoesNotExist as ex: - self._validate.queue_identification(queue_name, project_id) - self._queue_controller.create(queue_name, project=project_id) - # NOTE(flwang): Queue is created in lazy mode, so no metadata - # set. - queue_meta = {} - - queue_max_msg_size = queue_meta.get('_max_messages_post_size', - None) - queue_default_ttl = queue_meta.get('_default_message_ttl') - - # TODO(flwang): To avoid any unexpected regression issue, we just - # leave the _message_post_spec attribute of class as it's. It - # should be removed in Newton release. - if queue_default_ttl: - _message_post_spec = (('ttl', int, queue_default_ttl), - ('body', '*', None),) - else: - _message_post_spec = (('ttl', int, self._defaults.message_ttl), - ('body', '*', None),) - # Place JSON size restriction before parsing - self._validate.message_length(len(str(messages)), - max_msg_post_size=queue_max_msg_size) - except validation.ValidationFailed as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - - try: - messages = api_utils.sanitize(messages, - _message_post_spec, - doctype=list) - except api_errors.BadRequest as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - - try: - client_uuid = api_utils.get_client_uuid(req) - - self._validate.message_posting(messages) - - message_ids = self._message_controller.post( - queue_name, - messages=messages, - project=project_id, - client_uuid=client_uuid) - except (api_errors.BadRequest, validation.ValidationFailed) as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - headers = {'status': 404} - return api_utils.error_response(req, ex, headers) - except storage_errors.MessageConflict as ex: - LOG.exception(ex) - error = _(u'No messages could be enqueued.') - headers = {'status': 500} - return api_utils.error_response(req, ex, headers, error) - - # Prepare the response - headers = {'status': 201} - body = {'message_ids': message_ids} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def message_delete(self, req): - """Delete a message from a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - message_id = req._body.get('message_id') - - LOG.debug(u'Messages item DELETE - message: %(message)s, ' - u'queue: %(queue)s, project: %(project)s', - {'message': message_id, - 'queue': queue_name, - 'project': project_id}) - - claim_id = req._body.get('claim_id') - - try: - self._message_controller.delete( - queue_name, - message_id=message_id, - project=project_id, - claim=claim_id) - except storage_errors.MessageNotClaimed as ex: - LOG.debug(ex) - error = _(u'A claim was specified, but the message ' - u'is not currently claimed.') - headers = {'status': 400} - return api_utils.error_response(req, ex, headers, error) - except storage_errors.ClaimDoesNotExist as ex: - LOG.debug(ex) - error = _(u'The specified claim does not exist or ' - u'has expired.') - headers = {'status': 400} - return api_utils.error_response(req, ex, headers, error) - except storage_errors.NotPermitted as ex: - LOG.debug(ex) - error = _(u'This message is claimed; it cannot be ' - u'deleted without a valid claim ID.') - headers = {'status': 403} - return api_utils.error_response(req, ex, headers, error) - - headers = {'status': 204} - body = {} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def message_delete_many(self, req): - """Deletes a set of messages from a queue - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - message_ids = req._body.get('message_ids') - pop_limit = req._body.get('pop') - - LOG.debug(u'Messages collection DELETE - queue: %(queue)s,' - u'project: %(project)s, messages: %(message_ids)s', - {'queue': queue_name, 'project': project_id, - 'message_ids': message_ids}) - - try: - self._validate.message_deletion(message_ids, pop_limit) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - - if message_ids: - return self._delete_messages_by_id(req, queue_name, message_ids, - project_id) - elif pop_limit: - return self._pop_messages(req, queue_name, project_id, pop_limit) - - @api_utils.on_exception_sends_500 - def _delete_messages_by_id(self, req, queue_name, ids, project_id): - self._message_controller.bulk_delete(queue_name, message_ids=ids, - project=project_id) - - headers = {'status': 204} - body = {} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def _pop_messages(self, req, queue_name, project_id, pop_limit): - - LOG.debug(u'Pop messages - queue: %(queue)s, project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - messages = self._message_controller.pop( - queue_name, - project=project_id, - limit=pop_limit) - - # Prepare response - if not messages: - messages = [] - - headers = {'status': 200} - body = {'messages': messages} - - return response.Response(req, body, headers) - - # Claims - @api_utils.on_exception_sends_500 - def claim_create(self, req): - """Creates a claim - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Claims create - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - self._claim_post_spec = ( - ('ttl', int, self._defaults.claim_ttl), - ('grace', int, self._defaults.claim_grace), - ) - - # Claim some messages - - # NOTE(vkmc): We build a dict with the ttl and grace - # This is the metadata the storage is waiting for - kwargs = api_utils.get_headers(req) - # Read claim metadata (e.g., ttl) and raise appropriate - # errors as needed. - metadata = api_utils.sanitize(kwargs, self._claim_post_spec) - - limit = (None if kwargs.get('limit') is None - else kwargs.get('limit')) - - claim_options = {} if limit is None else {'limit': limit} - - try: - self._validate.claim_creation(metadata, limit=limit) - except (ValueError, validation.ValidationFailed) as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - - cid, msgs = self._claim_controller.create( - queue_name, - metadata=metadata, - project=project_id, - **claim_options) - - # Buffer claimed messages - # TODO(vkmc): optimize, along with serialization (below) - resp_msgs = list(msgs) - - # Serialize claimed messages, if any. This logic assumes - # the storage driver returned well-formed messages. - if len(resp_msgs) != 0: - resp_msgs = [api_utils.format_message(msg, cid) - for msg in resp_msgs] - - headers = {'status': 201} - body = {'claim_id': cid, 'messages': resp_msgs} - else: - headers = {'status': 204} - body = {'claim_id': cid} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def claim_get(self, req): - """Gets a claim - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - claim_id = req._body.get('claim_id') - - LOG.debug(u'Claim get - claim: %(claim_id)s, ' - u'queue: %(queue_name)s, project: %(project_id)s', - {'queue_name': queue_name, - 'project_id': project_id, - 'claim_id': claim_id}) - try: - meta, msgs = self._claim_controller.get( - queue_name, - claim_id=claim_id, - project=project_id) - - # Buffer claimed messages - # TODO(vkmc): Optimize along with serialization (see below) - meta['messages'] = list(msgs) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - error = _('Claim %s does not exist.') % claim_id - headers = {'status': 404} - return api_utils.error_response(req, ex, headers, error) - - # Serialize claimed messages - # TODO(vkmc): Optimize - meta['messages'] = [api_utils.format_message(msg, claim_id) - for msg in meta['messages']] - - del meta['id'] - - headers = {'status': 200} - body = meta - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def claim_update(self, req): - """Updates a claim - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - claim_id = req._body.get('claim_id') - - LOG.debug(u'Claim update - claim: %(claim_id)s, ' - u'queue: %(queue_name)s, project:%(project_id)s', - {'queue_name': queue_name, - 'project_id': project_id, - 'claim_id': claim_id}) - - self._claim_patch_spec = ( - ('ttl', int, self._defaults.claim_ttl), - ('grace', int, self._defaults.claim_grace), - ) - - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - metadata = api_utils.sanitize(req._body, self._claim_patch_spec) - - try: - self._validate.claim_updating(metadata) - self._claim_controller.update(queue_name, - claim_id=claim_id, - metadata=metadata, - project=project_id) - headers = {'status': 204} - body = _('Claim %s updated.') % claim_id - return response.Response(req, body, headers) - except validation.ValidationFailed as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - error = _('Claim %s does not exist.') % claim_id - headers = {'status': 404} - return api_utils.error_response(req, ex, headers, error) - - @api_utils.on_exception_sends_500 - def claim_delete(self, req): - """Deletes a claim - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - claim_id = req._body.get('claim_id') - - LOG.debug(u'Claim delete - claim: %(claim_id)s, ' - u'queue: %(queue_name)s, project: %(project_id)s', - {'queue_name': queue_name, - 'project_id': project_id, - 'claim_id': claim_id}) - - self._claim_controller.delete(queue_name, - claim_id=claim_id, - project=project_id) - - headers = {'status': 204} - body = _('Claim %s deleted.') % claim_id - - return response.Response(req, body, headers) - - # Subscriptions - @api_utils.on_exception_sends_500 - def subscription_list(self, req): - """List all subscriptions for a queue. - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - - LOG.debug(u'Subscription list - project: %(project)s', - {'project': project_id}) - - try: - kwargs = api_utils.get_headers(req) - - self._validate.subscription_listing(**kwargs) - results = self._subscription_controller.list( - queue_name, project=project_id, **kwargs) - # Buffer list of subscriptions. Can raise NoPoolFound error. - subscriptions = list(next(results)) - except (ValueError, validation.ValidationFailed) as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = 'Subscriptions could not be listed.' - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - - # Got some. Prepare the response. - body = {'subscriptions': subscriptions} - headers = {'status': 200} - - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def subscription_create(self, req, subscriber): - """Create a subscription for a queue. - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - options = req._body.get('options', {}) - ttl = req._body.get('ttl', self._defaults.subscription_ttl) - - LOG.debug( - u'Subscription create - queue: %(queue)s, project: %(project)s', - {'queue': queue_name, - 'project': project_id}) - - try: - url = netutils.urlsplit(subscriber) - mgr = driver.DriverManager('zaqar.notification.tasks', url.scheme, - invoke_on_load=True) - req_data = req._env.copy() - mgr.driver.register(subscriber, options, ttl, project_id, req_data) - - data = {'subscriber': subscriber, - 'options': options, - 'ttl': ttl} - self._validate.subscription_posting(data) - self._validate.queue_identification(queue_name, project_id) - if not self._queue_controller.exists(queue_name, project_id): - self._queue_controller.create(queue_name, project=project_id) - created = self._subscription_controller.create(queue_name, - subscriber, - data['ttl'], - data['options'], - project=project_id) - except validation.ValidationFailed as ex: - LOG.debug(ex) - headers = {'status': 400} - return api_utils.error_response(req, ex, headers) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = _('Subscription %s could not be created.') % queue_name - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - else: - if created: - msg = _('Subscription %s created.') % queue_name - body = {'subscription_id': str(created), 'message': msg} - headers = {'status': 201} - else: - body = _('Subscription %s not created.') % queue_name - headers = {'status': 409} - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def subscription_delete(self, req): - """Delete a specific subscription by ID. - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - subscription_id = req._body.get('subscription_id') - - LOG.debug( - u'Subscription delete - queue: %(queue)s, project: %(project)s', - {'queue': queue_name, 'project': project_id}) - try: - self._subscription_controller.delete(queue_name, - subscription_id, - project=project_id) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - error = _('Subscription %(subscription)s for queue %(queue)s ' - 'could not be deleted.') % { - 'subscription': subscription_id, 'queue': queue_name} - headers = {'status': 503} - return api_utils.error_response(req, ex, headers, error) - else: - body = _('Subscription %s removed.') % subscription_id - headers = {'status': 204} - return response.Response(req, body, headers) - - @api_utils.on_exception_sends_500 - def subscription_get(self, req): - """Retrieve details about an existing subscription. - - :param req: Request instance ready to be sent. - :type req: `api.common.Request` - :return: resp: Response instance - :type: resp: `api.common.Response` - """ - project_id = req._headers.get('X-Project-ID') - queue_name = req._body.get('queue_name') - subscription_id = req._body.get('subscription_id') - - LOG.debug(u'Subscription get - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - resp_dict = self._subscription_controller.get(queue_name, - subscription_id, - project=project_id) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - error = _('Subscription %(subscription)s for queue %(queue)s ' - 'does not exist.') % { - 'subscription': subscription_id, 'queue': queue_name} - headers = {'status': 404} - return api_utils.error_response(req, ex, headers, error) - except storage_errors.ExceptionBase as ex: - LOG.exception(ex) - headers = {'status': 503} - error = _('Cannot retrieve subscription %s.') % subscription_id - return api_utils.error_response(req, ex, headers, error) - else: - body = resp_dict - headers = {'status': 200} - return response.Response(req, body, headers) diff --git a/zaqar/api/v2/request.py b/zaqar/api/v2/request.py deleted file mode 100644 index d7525cf2..00000000 --- a/zaqar/api/v2/request.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.api.v1_1 import request as v1_1 -from zaqar.common import consts - - -class RequestSchema(v1_1.RequestSchema): - - headers = v1_1.RequestSchema.headers - schema = v1_1.RequestSchema.schema - - schema.update({ - - # Subscriptions - consts.SUBSCRIPTION_LIST: { - 'properties': { - 'action': {'enum': [consts.SUBSCRIPTION_LIST]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.SUBSCRIPTION_CREATE: { - 'properties': { - 'action': {'enum': [consts.SUBSCRIPTION_CREATE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID']}, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'subscriber': {'type': 'string'}, - 'ttl': {'type': 'integer'}, - 'options': {'type': 'object'}, - }, - 'required': ['queue_name', ], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.SUBSCRIPTION_DELETE: { - 'properties': { - 'action': {'enum': [consts.SUBSCRIPTION_DELETE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'subscription_id': {'type': 'string'}, - }, - 'required': ['queue_name', 'subscription_id'] - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.SUBSCRIPTION_GET: { - 'properties': { - 'action': {'enum': [consts.SUBSCRIPTION_GET]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID'] - }, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'subscription_id': {'type': 'string'}, - }, - 'required': ['queue_name', 'subscription_id'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - - consts.QUEUE_PURGE: { - 'properties': { - 'action': {'enum': [consts.QUEUE_PURGE]}, - 'headers': { - 'type': 'object', - 'properties': headers, - 'required': ['Client-ID', 'X-Project-ID']}, - 'body': { - 'type': 'object', - 'properties': { - 'queue_name': {'type': 'string'}, - 'resource_types': {'type': 'array'}, - }, - 'required': ['queue_name'], - } - }, - 'required': ['action', 'headers', 'body'] - }, - }) diff --git a/zaqar/api/v2/response.py b/zaqar/api/v2/response.py deleted file mode 100644 index c752dece..00000000 --- a/zaqar/api/v2/response.py +++ /dev/null @@ -1,411 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.common.api import api -from zaqar.common import consts - - -class ResponseSchema(api.Api): - - """Define validation schema for json response.""" - - def __init__(self, limits): - self.limits = limits - - age = { - "type": "number", - "minimum": 0 - } - - message = { - "type": "object", - "properties": { - "id": { - "type": "string", - }, - "href": { - "type": "string", - "pattern": "^(/v1/queues/[a-zA-Z0-9_-]{1,64}" - "/messages/[a-zA-Z0-9_-]+)(\?claim_id=[a-zA-Z0-9_-]+)?$" - }, - "age": age, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body", "id"], - "additionalProperties": False, - } - - claim_href = { - "type": "string", - "pattern": "^(/v2/queues/[a-zA-Z0-9_-]{1,64}" - "/messages/[a-zA-Z0-9_-]+)" - "\?claim_id=[a-zA-Z0-9_-]+$" - } - - flavor = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'pattern': '^/v2/flavors/[a-zA-Z0-9_-]{1,64}$' - }, - 'pool': { - 'type': 'string', - }, - 'project': { - 'type': 'string' - }, - 'capabilities': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['href', 'pool', 'project'], - 'additionalProperties': False, - } - - self.schema = { - consts.MESSAGE_GET_MANY: { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": message, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['messages'], - 'additionalProperties': False, - }, - - consts.QUEUE_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string', - 'enum': ['next'], - }, - 'href': { - 'type': 'string', - "pattern": "^/v2/queues\?", - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False, - }, - 'minItems': 1, - 'maxItems': 1, - }, - - 'queues': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'pattern': '^[a-zA-Z0-9_-]{1,64}$' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v2/queues/' - '[a-zA-Z0-9_-]{1,64}$', - }, - 'metadata': { - 'type': 'object', - } - }, - 'required': ['name', 'href'], - 'additionalProperties': False, - }, - 'minItems': 1, - 'maxItems': self.limits.max_queues_per_page, - } - }, - 'required': ['links', 'queues'], - 'additionalProperties': False, - }, - - consts.QUEUE_GET_STATS: { - 'type': 'object', - 'properties': { - 'messages': { - 'type': 'object', - 'properties': { - 'free': { - 'type': 'number', - 'minimum': 0 - }, - 'claimed': { - 'type': 'number', - 'minimum': 0 - }, - 'total': { - 'type': 'number', - 'minimum': 0 - }, - 'oldest': { - 'type': 'object' - }, - 'newest': { - 'type': 'object' - } - - }, - 'required': ['free', 'claimed', 'total'], - 'additionalProperties': False - } - }, - 'required': ['messages'], - 'additionalProperties': False - }, - - consts.POOL_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v2/pools\?' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'pools': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'pattern': '^/v2/' - 'pools/[a-zA-Z0-9_-]{1,64}$' - }, - 'weight': { - 'type': 'number', - 'minimum': -1 - }, - 'name': { - 'type': 'string' - }, - 'uri': { - 'type': 'string' - }, - 'group': { - 'type': ['string', 'null'] - }, - 'options': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['href', 'weight', 'uri', 'group'], - 'additionalProperties': False, - }, - } - }, - 'required': ['links', 'pools'], - 'additionalProperties': False - }, - - consts.MESSAGE_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v2/queues/[a-zA-Z0-9_-]+' - '/messages\?(.)*$' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'messages': { - "type": "array", - "items": message, - "minItems": 0, - "maxItems": self.limits.max_messages_per_claim_or_pop - } - } - }, - consts.POOL_GET_DETAIL: { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string' - }, - 'uri': { - 'type': 'string' - }, - 'group': { - 'type': ['string', 'null'] - }, - 'weight': { - 'type': 'number', - 'minimum': -1 - }, - 'href': { - 'type': 'string', - 'pattern': '^/v2/pools/' - '[a-zA-Z0-9_\-]+$' - }, - 'options': { - 'type': 'object', - 'additionalProperties': True - } - }, - 'required': ['uri', 'weight', 'href'], - 'additionalProperties': False - }, - - consts.CLAIM_CREATE: { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - }, - "href": claim_href, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - "age": age, - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body", "id"], - "additionalProperties": False, - }, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['messages'], - 'additionalProperties': False - }, - - consts.CLAIM_GET: { - 'type': 'object', - 'properties': { - 'age': age, - 'ttl': { - 'type': 'number', - 'minimum': 0, - 'maximum': self.limits.max_claim_ttl - }, - 'href': { - 'type': 'string', - 'pattern': '^/v2/queues/[a-zA-Z0-9_-]+' - '/claims/[a-zA-Z0-9_-]+$' - }, - 'messages': { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - }, - "href": claim_href, - "ttl": { - "type": "number", - "minimum": 1, - "maximum": self.limits.max_message_ttl - }, - "age": age, - "body": { - "type": "object" - } - }, - "required": ["href", "ttl", "age", "body", "id"], - "additionalProperties": False, - }, - "minItems": 1, - "maxItems": self.limits.max_messages_per_page - } - }, - 'required': ['age', 'ttl', 'messages', 'href'], - 'additionalProperties': False - }, - - consts.FLAVOR_LIST: { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'rel': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'pattern': '^/v2/flavors\?' - } - }, - 'required': ['rel', 'href'], - 'additionalProperties': False - } - }, - 'flavors': { - 'type': 'array', - 'items': flavor, - } - }, - 'required': ['links', 'flavors'], - 'additionalProperties': False - } - - } diff --git a/zaqar/bench/__init__.py b/zaqar/bench/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/bench/conductor.py b/zaqar/bench/conductor.py deleted file mode 100644 index fd1b84dc..00000000 --- a/zaqar/bench/conductor.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import json -import multiprocessing as mp -import os -# NOTE(Eva-i): See https://github.com/gevent/gevent/issues/349. Let's keep -# it until the new stable version of gevent(>=1.1) will be released. -os.environ["GEVENT_RESOLVER"] = "ares" - -from zaqar.bench import config -from zaqar.bench import consumer -from zaqar.bench import helpers -from zaqar.bench import observer -from zaqar.bench import producer - -CONF = config.conf - - -def _print_debug_stats(name, stats): - print(name.capitalize()) - print('=' * len(name)) - - values = sorted(stats.items(), key=lambda v: v[0]) - formatted_vals = ['{}: {:.1f}'.format(*v) for v in values] - - print('\n'.join(formatted_vals)) - print() # Blank line - - -def _reset_queues(): - cli = helpers.get_new_client() - for queue_name in helpers.queue_names: - queue = cli.queue(queue_name) - queue.delete() - - -def main(): - CONF(project='zaqar', prog='zaqar-benchmark') - - # NOTE(kgriffs): Reset queues since last time. We don't - # clean them up after the performance test, in case - # the user wants to examine the state of the system. - if not CONF.skip_queue_reset: - if CONF.debug: - print('Resetting queues...') - - _reset_queues() - - downstream_queue = mp.Queue() - procs = [mp.Process(target=worker.run, args=(downstream_queue,)) - for worker in [producer, consumer, observer]] - - for each_proc in procs: - each_proc.start() - - for each_proc in procs: - each_proc.join() - - stats = {} - for each_proc in procs: - stats.update(downstream_queue.get_nowait()) - - if CONF.debug: - print() - - for name in ('producer', 'observer', 'consumer'): - stats_group = stats[name] - - # Skip disabled workers - if not stats_group['duration_sec']: - continue - - _print_debug_stats(name, stats_group) - - else: - stats['params'] = { - 'producer': { - 'processes': CONF.producer_processes, - 'workers': CONF.producer_workers - }, - 'consumer': { - 'processes': CONF.consumer_processes, - 'workers': CONF.consumer_workers - }, - 'observer': { - 'processes': CONF.observer_processes, - 'workers': CONF.observer_workers - }, - } - - print(json.dumps(stats)) diff --git a/zaqar/bench/config.py b/zaqar/bench/config.py deleted file mode 100644 index 5d4959d2..00000000 --- a/zaqar/bench/config.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -conf = cfg.CONF -_CLI_OPTIONS = ( - cfg.IntOpt( - 'producer_processes', - short='pp', - default=1, - help='Number of Producer Processes'), - cfg.IntOpt( - 'producer_workers', - short='pw', - default=10, - help='Number of Producer Workers'), - - cfg.IntOpt( - 'consumer_processes', - short='cp', - default=1, - help='Number of Consumer Processes'), - cfg.IntOpt( - 'consumer_workers', - short='cw', - default=0, - help='Number of Consumer Workers'), - - cfg.IntOpt( - 'observer_processes', - short='op', - default=1, - help='Number of Observer Processes'), - cfg.IntOpt( - 'observer_workers', - short='ow', - default=5, - help='Number of Observer Workers'), - - cfg.BoolOpt('debug', default=True, - help=('Tag to indicate if print the details of running.')), - - cfg.FloatOpt('api_version', short='api', default='2', - help='Zaqar API version to use'), - - cfg.IntOpt('messages_per_claim', short='cno', default=5, - help=('Number of messages the consumer will attempt to ' - 'claim at a time')), - cfg.IntOpt('messages_per_list', short='lno', default=5, - help=('Number of messages the observer will attempt to ' - 'list at a time')), - - cfg.IntOpt('time', short='t', default=5, - help="Duration of the performance test, in seconds"), - - cfg.StrOpt('server_url', short='s', default='http://localhost:8888'), - - cfg.StrOpt('queue_prefix', short='q', default='ogre-test-queue'), - cfg.IntOpt('num_queues', short='qno', default=4), - - cfg.StrOpt('messages_path', short='m'), - - cfg.BoolOpt('skip_queue_reset', default=False, - help=('Do not reset queues before running' - 'the performance test')), -) -conf.register_cli_opts(_CLI_OPTIONS) diff --git a/zaqar/bench/consumer.py b/zaqar/bench/consumer.py deleted file mode 100644 index 9479d135..00000000 --- a/zaqar/bench/consumer.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division -from __future__ import print_function - -import multiprocessing as mp -import random -import sys -import time - -from gevent import monkey as curious_george -curious_george.patch_all(thread=False, select=False) -import gevent -import marktime -from zaqarclient.transport import errors - -from zaqar.bench import config -from zaqar.bench import helpers - -CONF = config.conf - - -def claim_delete(queues, stats, test_duration, ttl, grace, limit): - """Consumer Worker - - The Consumer Worker continuously claims and deletes messages - for the specified duration. The time taken for each claim and - delete is recorded for calculating throughput and latency. - """ - - end = time.time() + test_duration - claim_total_elapsed = 0 - delete_total_elapsed = 0 - total_failed_requests = 0 - claim_total_requests = 0 - delete_total_requests = 0 - - while time.time() < end: - # NOTE(kgriffs): Distribute requests across all queues evenly. - queue = random.choice(queues) - - try: - marktime.start('claim_message') - - claim = queue.claim(ttl=ttl, grace=grace, limit=limit) - - claim_total_elapsed += marktime.stop('claim_message').seconds - claim_total_requests += 1 - - except errors.TransportError as ex: - sys.stderr.write("Could not claim messages : {0}\n".format(ex)) - total_failed_requests += 1 - - else: - for msg in claim: - try: - marktime.start('delete_message') - - msg.delete() - - elapsed = marktime.stop('delete_message').seconds - delete_total_elapsed += elapsed - delete_total_requests += 1 - - except errors.TransportError as ex: - msg = "Could not delete messages: {0}\n".format(ex) - sys.stderr.write(msg) - total_failed_requests += 1 - - total_requests = (claim_total_requests + - delete_total_requests + - total_failed_requests) - - stats.put({ - 'total_requests': total_requests, - 'claim_total_requests': claim_total_requests, - 'delete_total_requests': delete_total_requests, - 'claim_total_elapsed': claim_total_elapsed, - 'delete_total_elapsed': delete_total_elapsed, - }) - - -def load_generator(stats, num_workers, num_queues, - test_duration, url, ttl, grace, limit): - - cli = helpers.get_new_client() - queues = [] - for queue_name in helpers.queue_names: - queues.append(cli.queue(queue_name)) - - gevent.joinall([ - gevent.spawn(claim_delete, - queues, stats, test_duration, ttl, grace, limit) - - for _ in range(num_workers) - ]) - - -def crunch(stats): - total_requests = 0 - claim_total_elapsed = 0.0 - delete_total_elapsed = 0.0 - claim_total_requests = 0 - delete_total_requests = 0 - - while not stats.empty(): - entry = stats.get_nowait() - total_requests += entry['total_requests'] - claim_total_elapsed += entry['claim_total_elapsed'] - delete_total_elapsed += entry['delete_total_elapsed'] - claim_total_requests += entry['claim_total_requests'] - delete_total_requests += entry['delete_total_requests'] - - return (total_requests, claim_total_elapsed, delete_total_elapsed, - claim_total_requests, delete_total_requests) - - -def run(upstream_queue): - num_procs = CONF.consumer_processes - num_workers = CONF.consumer_workers - num_queues = CONF.num_queues - - # Stats that will be reported - duration = 0 - total_requests = 0 - successful_requests = 0 - claim_total_requests = 0 - delete_total_requests = 0 - throughput = 0 - claim_latency = 0 - delete_latency = 0 - - # Performance test - if num_procs and num_workers: - stats = mp.Queue() - # TODO(TheSriram) : Make ttl and grace configurable - args = (stats, num_workers, num_queues, CONF.time, CONF.server_url, - 300, 200, CONF.messages_per_claim) - - procs = [mp.Process(target=load_generator, args=args) - for _ in range(num_procs)] - - if CONF.debug: - print('\nStarting consumers (cp={0}, cw={1})...'.format( - num_procs, num_workers)) - - start = time.time() - - for each_proc in procs: - each_proc.start() - - for each_proc in procs: - each_proc.join() - - (total_requests, claim_total_elapsed, delete_total_elapsed, - claim_total_requests, delete_total_requests) = crunch(stats) - - successful_requests = claim_total_requests + delete_total_requests - duration = time.time() - start - - # NOTE(kgriffs): Duration should never be zero - throughput = successful_requests / duration - - if claim_total_requests: - claim_latency = (1000 * claim_total_elapsed / - claim_total_requests) - - if delete_total_requests: - delete_latency = (1000 * delete_total_elapsed / - delete_total_requests) - - upstream_queue.put({ - 'consumer': { - 'duration_sec': duration, - 'total_reqs': total_requests, - 'claim_total_requests': claim_total_requests, - 'successful_reqs': successful_requests, - 'messages_processed': delete_total_requests, - 'reqs_per_sec': throughput, - 'ms_per_claim': claim_latency, - 'ms_per_delete': delete_latency, - } - }) diff --git a/zaqar/bench/helpers.py b/zaqar/bench/helpers.py deleted file mode 100644 index 2117008e..00000000 --- a/zaqar/bench/helpers.py +++ /dev/null @@ -1,135 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE(Eva-i): Some code was taken from python-zaqarclient. - -import os -import sys - -import os_client_config -from zaqarclient.queues import client - -from zaqar.bench import config - -CONF = config.conf - - -def _get_credential_args(): - """Retrieves credential arguments for keystone - - Credentials are either read via os-client-config from the environment - or from a config file ('clouds.yaml'). Config file variables override those - from the environment variables. - - devstack produces a clouds.yaml with two named clouds - one named - 'devstack' which has user privs and one named 'devstack-admin' which - has admin privs. This function will default to getting the credentials from - environment variables. If not all required credentials present in - environment variables, it tries to get credentials for 'devstack-admin' - cloud in clouds.yaml. If no 'devstack-admin' cloud found, it tried to get - credentials for 'devstack' cloud. If no 'devstack' cloud found, throws - an error and stops the application. - """ - os_cfg = os_client_config.OpenStackConfig() - - cloud = os_cfg.get_one_cloud() - cred_args = cloud.get_auth_args() - - cred_args['insecure'] = cloud.auth.get('insecure') - cred_args['cacert'] = cloud.auth.get('cacert') - cred_args['token'] = cloud.auth.get('token') - - required_options = ['username', 'password', 'auth_url', 'project_name'] - if not all(arg in cred_args for arg in required_options): - try: - cloud = os_cfg.get_one_cloud(cloud='devstack-admin') - except Exception: - try: - cloud = os_cfg.get_one_cloud(cloud='devstack') - except Exception: - print("Insufficient amount of credentials found for keystone " - "authentication. Credentials should reside either in " - "environment variables or in 'clouds.yaml' file. If " - "both present, the ones in environment variables will " - "be preferred. Exiting.") - sys.exit() - cred_args = cloud.get_auth_args() - - print("Using '{}' credentials".format(cloud.name)) - return cred_args - - -def _generate_client_conf(): - auth_strategy = os.environ.get('OS_AUTH_STRATEGY', 'noauth') - - if auth_strategy == 'keystone': - args = _get_credential_args() - conf = { - 'auth_opts': { - 'backend': 'keystone', - 'options': { - 'os_username': args.get('username'), - 'os_password': args.get('password'), - 'os_project_name': args['project_name'], - 'os_auth_url': args['auth_url'], - 'insecure': args.get('insecure'), - 'cacert': args.get('cacert'), - 'auth_token': args.get('token') - }, - }, - } - else: - conf = { - 'auth_opts': { - 'backend': 'noauth', - 'options': { - 'os_project_id': 'my-lovely-benchmark', - }, - }, - } - print("Using '{0}' authentication method".format(conf['auth_opts'] - ['backend'])) - return conf - - -class LazyAPIVersion(object): - def __init__(self): - self.api_version = None - - @property - def get(self): - if self.api_version is None: - conversion_map = { - 1.0: 1, - 1.1: 1.1, - 2.0: 2, - } - try: - self.api_version = conversion_map[CONF.api_version] - except KeyError: - print("Unknown Zaqar API version: '{}'. Exiting...".format( - CONF.api_version)) - sys.exit() - print("Benchmarking Zaqar API v{0}...".format(self.api_version)) - return self.api_version - - -client_conf = _generate_client_conf() -client_api = LazyAPIVersion() -queue_names = [] -for i in range(CONF.num_queues): - queue_names.append((CONF.queue_prefix + '-' + str(i))) - - -def get_new_client(): - return client.Client(CONF.server_url, client_api.get, conf=client_conf) diff --git a/zaqar/bench/observer.py b/zaqar/bench/observer.py deleted file mode 100644 index 861eacde..00000000 --- a/zaqar/bench/observer.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division -from __future__ import print_function - -import multiprocessing as mp -import random -import sys -import time - -from gevent import monkey as curious_george -curious_george.patch_all(thread=False, select=False) -import gevent -import marktime -from six.moves import urllib -from zaqarclient.transport import errors - -from zaqar.bench import config -from zaqar.bench import helpers - -CONF = config.conf - - -# -# TODO(kgriffs): Factor out the common code from producer, consumer -# and worker (DRY all the things!) -# - - -def _extract_marker(links): - for link in links: - if link['rel'] == 'next': - href = link['href'] - break - - query = urllib.parse.urlparse(href).query - params = urllib.parse.parse_qs(query) - return params['marker'][0] - - -def observer(queues, stats, test_duration, limit): - """Observer Worker - - The observer lists messages without claiming them. - """ - - end = time.time() + test_duration - - total_elapsed = 0 - total_succeeded = 0 - total_failed = 0 - - queues = [{'q': q, 'm': None} for q in queues] - - while time.time() < end: - # NOTE(kgriffs): Distribute requests across all queues evenly. - queue = random.choice(queues) - - try: - marktime.start('list_messages') - cursor = queue['q'].messages(limit=limit, marker=queue['m'], - include_claimed=True) - total_elapsed += marktime.stop('list_messages').seconds - total_succeeded += 1 - - messages = list(cursor) - - if messages: - # TODO(kgriffs): Figure out a less hacky way to do this - # while preserving the ability to measure elapsed time - # per request. - queue['m'] = _extract_marker(cursor._links) - - except errors.TransportError as ex: - sys.stderr.write("Could not list messages : {0}\n".format(ex)) - total_failed += 1 - - total_requests = total_succeeded + total_failed - - stats.put({ - 'total_requests': total_requests, - 'total_succeeded': total_succeeded, - 'total_elapsed': total_elapsed, - }) - - -def load_generator(stats, num_workers, num_queues, - test_duration, limit): - - cli = helpers.get_new_client() - queues = [] - for queue_name in helpers.queue_names: - queues.append(cli.queue(queue_name)) - - gevent.joinall([ - gevent.spawn(observer, - queues, stats, test_duration, limit) - - for _ in range(num_workers) - ]) - - -def crunch(stats): - total_requests = 0 - total_succeeded = 0 - total_elapsed = 0.0 - - while not stats.empty(): - entry = stats.get_nowait() - total_requests += entry['total_requests'] - total_succeeded += entry['total_succeeded'] - total_elapsed += entry['total_elapsed'] - - return total_requests, total_succeeded, total_elapsed - - -def run(upstream_queue): - num_procs = CONF.observer_processes - num_workers = CONF.observer_workers - num_queues = CONF.num_queues - - # Stats that will be reported - duration = 0 - total_requests = 0 - total_succeeded = 0 - throughput = 0 - latency = 0 - - # Performance test - if num_procs and num_workers: - test_duration = CONF.time - stats = mp.Queue() - args = (stats, num_workers, num_queues, test_duration, - CONF.messages_per_list) - - procs = [mp.Process(target=load_generator, args=args) - for _ in range(num_procs)] - - if CONF.debug: - print('\nStarting observer (op={0}, ow={1})...'.format( - num_procs, num_workers)) - - start = time.time() - - for each_proc in procs: - each_proc.start() - - for each_proc in procs: - each_proc.join() - - (total_requests, total_succeeded, total_elapsed) = crunch(stats) - - duration = time.time() - start - - throughput = total_succeeded / duration - - if total_succeeded: - latency = (1000 * total_elapsed / total_succeeded) - - upstream_queue.put({ - 'observer': { - 'duration_sec': duration, - 'total_reqs': total_requests, - 'successful_reqs': total_succeeded, - 'reqs_per_sec': throughput, - 'ms_per_req': latency, - } - }) diff --git a/zaqar/bench/producer.py b/zaqar/bench/producer.py deleted file mode 100644 index 62c5998f..00000000 --- a/zaqar/bench/producer.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division -from __future__ import print_function - -import json -import multiprocessing as mp -import random -import sys -import time - -from gevent import monkey as curious_george -curious_george.patch_all(thread=False, select=False) -import gevent -import marktime -from zaqarclient.transport import errors - -from zaqar.bench import config -from zaqar.bench import helpers - -CONF = config.conf - - -def choose_message(message_pool): - """Choose a message from our pool of possibilities.""" - - # Assume message_pool is sorted by weight, ascending - position = random.random() - accumulator = 0.00 - - for each_message in message_pool: - accumulator += each_message['weight'] - if position < accumulator: - return each_message['doc'] - - assert False - - -def load_messages(): - default_file_name = 'zaqar-benchmark-messages.json' - messages_path = CONF.messages_path or CONF.find_file(default_file_name) - if messages_path: - with open(messages_path) as f: - message_pool = json.load(f) - message_pool.sort(key=lambda msg: msg['weight']) - return message_pool - else: - return [{"weight": 1.0, - "doc": {"ttl": 60, - "body": {"id": "7FA23C90-62F7-40D2-9360-FBD5D7D61CD1", - "evt": "Single"}}}] - - -def producer(queues, message_pool, stats, test_duration): - """Producer Worker - - The Producer Worker continuously post messages for - the specified duration. The time taken for each post - is recorded for calculating throughput and latency. - """ - - total_requests = 0 - successful_requests = 0 - total_elapsed = 0 - end = time.time() + test_duration - - while time.time() < end: - queue = random.choice(queues) - - try: - marktime.start('post_message') - - queue.post(choose_message(message_pool)) - - total_elapsed += marktime.stop('post_message').seconds - successful_requests += 1 - - except errors.TransportError as ex: - sys.stderr.write("Could not post a message : {0}\n".format(ex)) - - total_requests += 1 - - stats.put({ - 'successful_requests': successful_requests, - 'total_requests': total_requests, - 'total_elapsed': total_elapsed - }) - - -# TODO(TheSriram): make distributed across multiple machines -# TODO(TheSriram): post across several queues (which workers to which queues? -# weight them, so can have some busy queues, some not.) -def load_generator(stats, num_workers, num_queues, test_duration): - - cli = helpers.get_new_client() - queues = [] - for queue_name in helpers.queue_names: - queues.append(cli.queue(queue_name)) - - message_pool = load_messages() - - gevent.joinall([ - gevent.spawn(producer, - queues, message_pool, stats, test_duration) - - for _ in range(num_workers) - ]) - - -def crunch(stats): - total_requests = 0 - total_latency = 0.0 - successful_requests = 0 - - while not stats.empty(): - entry = stats.get_nowait() - total_requests += entry['total_requests'] - total_latency += entry['total_elapsed'] - successful_requests += entry['successful_requests'] - - return successful_requests, total_requests, total_latency - - -def run(upstream_queue): - num_procs = CONF.producer_processes - num_workers = CONF.producer_workers - num_queues = CONF.num_queues - - duration = 0 - total_requests = 0 - successful_requests = 0 - throughput = 0 - latency = 0 - - if num_procs and num_workers: - test_duration = CONF.time - stats = mp.Queue() - args = (stats, num_workers, num_queues, test_duration) - - # TODO(TheSriram): Multiple test runs, vary num workers and - # drain/delete queues in between each run. Plot these on a - # graph, with concurrency as the X axis. - - procs = [ - mp.Process(target=load_generator, args=args) - for _ in range(num_procs) - ] - - if CONF.debug: - print('\nStarting producer (pp={0}, pw={1})...'.format( - num_procs, num_workers)) - - start = time.time() - - for each_proc in procs: - each_proc.start() - - for each_proc in procs: - each_proc.join() - - successful_requests, total_requests, total_latency = crunch(stats) - - duration = time.time() - start - - # NOTE(kgriffs): Duration should never be zero - throughput = successful_requests / duration - - if successful_requests: - latency = 1000 * total_latency / successful_requests - - upstream_queue.put({ - 'producer': { - 'duration_sec': duration, - 'total_reqs': total_requests, - 'successful_reqs': successful_requests, - 'reqs_per_sec': throughput, - 'ms_per_req': latency - } - }) diff --git a/zaqar/bootstrap.py b/zaqar/bootstrap.py deleted file mode 100644 index 91d47dcc..00000000 --- a/zaqar/bootstrap.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from oslo_log import log -from osprofiler import opts as profiler_opts -from osprofiler import profiler -from stevedore import driver - -from zaqar.api import handler -from zaqar.common import cache as oslo_cache -from zaqar.common import configs -from zaqar.common import consts -from zaqar.common import decorators -from zaqar.common import errors -from zaqar.storage import pipeline -from zaqar.storage import pooling -from zaqar.storage import utils as storage_utils -from zaqar.transport import base -from zaqar.transport.middleware import profile -from zaqar.transport import validation - -LOG = log.getLogger(__name__) - - -class Bootstrap(object): - """Defines the Zaqar bootstrapper. - - The bootstrap loads up drivers per a given configuration, and - manages their lifetimes. - """ - - def __init__(self, conf): - self.conf = conf - - for group, opts in configs._config_options(): - self.conf.register_opts(opts, group=group) - profiler_opts.set_defaults(self.conf) - - # TODO(wangxiyuan): Now the OSprofiler feature in Zaqar only support - # wsgi. Websocket part will be added in the future. - profile.setup(self.conf, 'Zaqar-server', socket.gethostname()) - - self.driver_conf = self.conf[configs._DRIVER_GROUP] - - @decorators.lazy_property(write=False) - def api(self): - LOG.debug(u'Loading API handler') - validate = validation.Validator(self.conf) - defaults = base.ResourceDefaults(self.conf) - return handler.Handler(self.storage, self.control, validate, defaults) - - @decorators.lazy_property(write=False) - def storage(self): - LOG.debug(u'Loading storage driver') - if self.conf.pooling: - LOG.debug(u'Storage pooling enabled') - storage_driver = pooling.DataDriver(self.conf, self.cache, - self.control) - if self.conf.profiler.enabled: - storage_driver = profiler.trace_cls("pooling_data_" - "driver")(storage_driver) - else: - storage_driver = storage_utils.load_storage_driver( - self.conf, self.cache, control_driver=self.control) - - LOG.debug(u'Loading storage pipeline') - return pipeline.DataDriver(self.conf, storage_driver, - self.control) - - @decorators.lazy_property(write=False) - def control(self): - LOG.debug(u'Loading storage control driver') - return storage_utils.load_storage_driver(self.conf, self.cache, - control_mode=True) - - @decorators.lazy_property(write=False) - def cache(self): - LOG.debug(u'Loading proxy cache driver') - try: - oslo_cache.register_config(self.conf) - return oslo_cache.get_cache(self.conf) - except RuntimeError as exc: - LOG.exception(exc) - raise errors.InvalidDriver(exc) - - @decorators.lazy_property(write=False) - def transport(self): - transport_name = self.driver_conf.transport - LOG.debug(u'Loading transport driver: %s', transport_name) - - if transport_name == consts.TRANSPORT_WEBSOCKET: - args = [self.conf, self.api, self.cache] - else: - args = [ - self.conf, - self.storage, - self.cache, - self.control, - ] - - try: - mgr = driver.DriverManager('zaqar.transport', - transport_name, - invoke_on_load=True, - invoke_args=args) - return mgr.driver - except RuntimeError as exc: - LOG.exception(exc) - LOG.error(u'Failed to load transport driver zaqar.transport.' - u'%(driver)s with args %(args)s', - {'driver': transport_name, 'args': args}) - raise errors.InvalidDriver(exc) - - def run(self): - self.transport.listen() diff --git a/zaqar/cmd/__init__.py b/zaqar/cmd/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/cmd/gc.py b/zaqar/cmd/gc.py deleted file mode 100644 index 767fd1a1..00000000 --- a/zaqar/cmd/gc.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log - -from zaqar import bootstrap -from zaqar.common import cli - -LOG = log.getLogger(__name__) - - -# In this first approach it's the responsibility of the operator -# to call the garbage collector manually. Using crontab or a similar -# tool is advised. -@cli.runnable -def run(): - # Use the global CONF instance - conf = cfg.CONF - conf(project='zaqar', prog='zaqar-gc') - - server = bootstrap.Bootstrap(conf) - - LOG.debug(u'Calling the garbage collector') - server.storage.gc() diff --git a/zaqar/cmd/server.py b/zaqar/cmd/server.py deleted file mode 100644 index ae1673f3..00000000 --- a/zaqar/cmd/server.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from oslo_config import cfg -from oslo_log import log -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from zaqar import bootstrap -from zaqar.common import cli -from zaqar.common import configs -from zaqar import version - -# NOTE(eggmaster): define command line options for zaqar-server -_CLI_OPTIONS = ( - configs._ADMIN_MODE_OPT, - cfg.BoolOpt('daemon', default=False, - help='Run Zaqar server in the background.'), -) - - -@cli.runnable -def run(): - # Use the global CONF instance - conf = cfg.CONF - gmr_opts.set_defaults(conf) - # NOTE(eggmaster): register command line options for zaqar-server - conf.register_cli_opts(_CLI_OPTIONS) - log.register_options(conf) - - # NOTE(jeffrey4l): Overwrite the default vaule for - # logging_context_format_string. Add project_id into it. - conf.set_default('logging_context_format_string', - '%(asctime)s.%(msecs)03d %(process)d %(levelname)s' - ' %(name)s [%(request_id)s %(user_identity)s]' - ' [project_id:%(project_id)s] %(message)s') - conf(project='zaqar', prog='zaqar-server') - log.setup(conf, 'zaqar') - - gmr.TextGuruMeditation.setup_autorun(version, conf=conf) - - server = bootstrap.Bootstrap(conf) - - # The following code is to daemonize zaqar-server to avoid - # an issue with wsgiref writing to stdout/stderr when we don't - # want it to. This is specifically needed to allow zaqar to - # run under devstack, but it may also be useful for other scenarios. - # Open /dev/zero and /dev/null for redirection. - # Daemonizing zaqar-server is needed *just* when running under devstack - # and when zaqar is invoked with `daemon` command line option. - if conf.daemon: - zerofd = os.open('/dev/zero', os.O_RDONLY) - nullfd = os.open('/dev/null', os.O_WRONLY) - - # Close the stdthings and reassociate them with a non terminal - os.dup2(zerofd, 0) - os.dup2(nullfd, 1) - os.dup2(nullfd, 2) - - # Detach process context, this requires 2 forks. - try: - pid = os.fork() - if pid > 0: - os._exit(0) - except OSError: - os._exit(1) - - try: - pid = os.fork() - if pid > 0: - os._exit(0) - except OSError: - os._exit(2) - server.run() diff --git a/zaqar/common/__init__.py b/zaqar/common/__init__.py deleted file mode 100644 index f8d8c812..00000000 --- a/zaqar/common/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Code common to Zaqar""" - -from zaqar.common import pipeline - -Pipeline = pipeline.Pipeline diff --git a/zaqar/common/access.py b/zaqar/common/access.py deleted file mode 100644 index 0fb89514..00000000 --- a/zaqar/common/access.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# TODO(cpp-cabrera): port to enum34 when that becomes available -class Access(object): - """An enumeration to represent access levels for APIs.""" - public = 1 - admin = 2 diff --git a/zaqar/common/api/__init__.py b/zaqar/common/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/common/api/api.py b/zaqar/common/api/api.py deleted file mode 100644 index e6f4b10a..00000000 --- a/zaqar/common/api/api.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import jsonschema -from jsonschema import validators -from oslo_log import log - -from zaqar.common import errors -from zaqar.i18n import _ - -LOG = log.getLogger(__name__) - - -class Api(object): - - schema = {} - validators = {} - - def get_schema(self, action): - """Returns the schema for an action - - :param action: Action for which params need - to be validated. - :type action: `six.text_type` - - :returns: Action's schema - :rtype: dict - - :raises InvalidAction: if the action does not exist - """ - - try: - return self.schema[action] - except KeyError: - msg = _('{0} is not a valid action').format(action) - raise errors.InvalidAction(msg) - - def validate(self, action, body): - """Validates the request data - - This method relies on jsonschema and exists - just as a way for third-party transport to validate - the request. It's not recommended to validate every - request since they are already validated server side. - - :param action: Action's for which body need - to be validated. - :type action: `six.text_type` - :param body: Params to validate - :type body: dict - - :returns: True if the schema is valid, False otherwise - :raises InvalidAction: if the action does not exist - """ - - if action not in self.validators: - schema = self.get_schema(action) - self.validators[action] = validators.Draft4Validator(schema) - - try: - self.validators[action].validate(body) - except jsonschema.ValidationError as ex: - LOG.debug('Schema validation failed. %s.', str(ex)) - return False - - return True diff --git a/zaqar/common/api/errors.py b/zaqar/common/api/errors.py deleted file mode 100644 index f63a1d1d..00000000 --- a/zaqar/common/api/errors.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class ExceptionBase(Exception): - - msg_format = '' - - def __init__(self, **kwargs): - msg = self.msg_format.format(**kwargs) - super(ExceptionBase, self).__init__(msg) - - -class BadRequest(ExceptionBase): - """Raised when an invalid request is received.""" - - msg_format = u'Bad request. {description}' - - def __init__(self, description): - """Initializes the error with contextual information. - - :param description: Error description - """ - - super(BadRequest, self).__init__(description=description) - - -class DocumentTypeNotSupported(ExceptionBase): - """Raised when the content of a request has an unsupported format.""" diff --git a/zaqar/common/api/request.py b/zaqar/common/api/request.py deleted file mode 100644 index 3760281f..00000000 --- a/zaqar/common/api/request.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class Request(object): - """General data for a Zaqar request - - Transport will generate a request object and send to this the API to be - processed. - :param action: Action to identify the API call being processed, - i.e: 'get_queues', 'get_messages' - :type action: str - :param body: Request's body. Default: None - :type body: str - :param headers: Request headers. Default: None - :type headers: dict - :param api: Api entry point. i.e: 'queues.v1' - :type api: `six.text_type`. - :param env: Request environment. Default: None - :type env: dict - """ - - def __init__(self, action, - body=None, headers=None, api=None, env=None): - self._action = action - self._body = body - self._headers = headers or {} - self._api = api - self._env = env or {} - - def get_request(self): - return {'action': self._action, - 'body': self._body, - 'headers': self._headers, - 'api': self._api} diff --git a/zaqar/common/api/response.py b/zaqar/common/api/response.py deleted file mode 100644 index a843a013..00000000 --- a/zaqar/common/api/response.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class Response(object): - """Common response class for Zaqar. - - All `zaqar.transport.base.Transport` implementations - will return this to the higher level API which will then build - an object out of it. - - :param request: The request sent to the server. - :type request: `zaqar.transport.request.Request` - :param body: Response's body - :type body: `six.string_types` - :param headers: Optional headers returned in the response. - :type headers: dict - """ - - __slots__ = ('_request', '_body', '_headers') - - def __init__(self, request, body, headers=None): - self._request = request - self._body = body - self._headers = headers or {} - - def get_response(self): - return {'request': self._request.get_request(), - 'body': self._body, - 'headers': self._headers} diff --git a/zaqar/common/api/schemas/__init__.py b/zaqar/common/api/schemas/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/common/api/schemas/flavors.py b/zaqar/common/api/schemas/flavors.py deleted file mode 100644 index 254d936a..00000000 --- a/zaqar/common/api/schemas/flavors.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""flavors: JSON schema for zaqar-queues flavors resources.""" - -# NOTE(flaper87): capabilities can be anything. These will be unique to -# each storage driver, so we don't perform any further validation at -# the transport layer. -patch_capabilities = { - 'type': 'object', - 'properties': { - 'capabilities': { - 'type': 'object' - } - } -} - -# NOTE(flaper87): a string valid -patch_pool = { - 'type': 'object', - 'properties': { - 'pool': { - 'type': 'string' - }, - 'additionalProperties': False - } -} - -patch_pool_group = { - 'type': 'object', - 'properties': { - 'pool_group': { - 'type': 'string' - }, - 'additionalProperties': False - } -} - -create = { - 'type': 'object', - 'properties': { - 'pool_group': patch_pool_group['properties']['pool_group'], - 'pool': patch_pool['properties']['pool'], - 'capabilities': patch_capabilities['properties']['capabilities'] - }, - # NOTE(flaper87): capabilities need not be present. Storage drivers - # must provide reasonable defaults. - # NOTE(wanghao): remove pool in Newton release. - 'oneOf': [{'required': ['pool_group']}, {'required': ['pool']}], - 'additionalProperties': False -} diff --git a/zaqar/common/api/schemas/pools.py b/zaqar/common/api/schemas/pools.py deleted file mode 100644 index ec9eee4d..00000000 --- a/zaqar/common/api/schemas/pools.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""pools: JSON schema for zaqar-queues pools resources.""" - -# NOTE(cpp-cabrera): options can be anything. These will be unique to -# each storage driver, so we don't perform any further validation at -# the transport layer. -patch_options = { - 'type': 'object', 'properties': { - 'options': { - 'type': 'object' - } - } -} - -patch_uri = { - 'type': 'object', 'properties': { - 'uri': { - 'type': 'string', - 'minLength': 0, - 'maxLength': 255, - 'format': 'uri' - }, - 'additionalProperties': False - } -} - -patch_group = { - 'type': 'object', 'properties': { - 'uri': { - 'type': 'string', - 'minLength': 0, - 'maxLength': 255 - }, - 'additionalProperties': False - } -} - - -patch_weight = { - 'type': 'object', 'properties': { - 'weight': { - 'type': 'integer', 'minimum': 0, 'maximum': 2**32 - 1 - }, - 'additionalProperties': False - } -} - -create = { - 'type': 'object', 'properties': { - 'weight': patch_weight['properties']['weight'], - 'group': patch_group['properties']['uri'], - 'uri': patch_uri['properties']['uri'], - 'options': patch_options['properties']['options'] - }, - # NOTE(cpp-cabrera): options need not be present. Storage drivers - # must provide reasonable defaults. - 'required': ['uri', 'weight'], - 'additionalProperties': False -} diff --git a/zaqar/common/api/utils.py b/zaqar/common/api/utils.py deleted file mode 100644 index 22560ac2..00000000 --- a/zaqar/common/api/utils.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import functools -import uuid - -from oslo_log import log as logging -from oslo_utils import strutils - -import zaqar.common.api.errors as api_errors -import zaqar.common.api.response as response -from zaqar.i18n import _ - -LOG = logging.getLogger(__name__) - - -def sanitize(document, spec=None, doctype=dict): - """Validates a document and drops undesired fields. - - :param document: A dict to verify according to `spec`. - :param spec: (Default None) Iterable describing expected fields, - yielding tuples with the form of: - - (field_name, value_type, default_value) - - Note that value_type may either be a Python type, or the - special string '*' to accept any type. default_value is the - default to give the field if it is missing, or None to require - that the field be present. - - If spec is None, the incoming documents will not be validated. - :param doctype: type of document to expect; must be either - JSONObject or JSONArray. - :raises DocumentTypeNotSupported: if document type is not supported - :raises TypeError: if document type is neither a JSONObject - nor JSONArray - :returns: A sanitized, filtered version of the document. If the - document is a list of objects, each object will be filtered - and returned in a new list. If, on the other hand, the document - is expected to contain a single object, that object's fields will - be filtered and the resulting object will be returned. - """ - - if doctype is dict: - if not isinstance(document, dict): - raise api_errors.DocumentTypeNotSupported() - - return document if spec is None else filter_fields(document, spec) - - if doctype is list: - if not isinstance(document, list): - raise api_errors.DocumentTypeNotSupported() - - if spec is None: - return document - - return [filter_fields(obj, spec) for obj in document] - - raise TypeError(_(u'Doctype must be either a JSONObject or JSONArray')) - - -def filter_fields(document, spec): - """Validates and retrieves typed fields from a single document. - - Sanitizes a dict-like document by checking it against a - list of field spec, and returning only those fields - specified. - - :param document: dict-like object - :param spec: iterable describing expected fields, yielding - tuples with the form of: (field_name, value_type). Note that - value_type may either be a Python type, or the special - string '*' to accept any type. - :raises BadRequest: if any field is missing or not an - instance of the specified type - :returns: A filtered dict containing only the fields - listed in the spec - """ - - filtered = {} - for name, value_type, default_value in spec: - filtered[name] = get_checked_field(document, name, - value_type, default_value) - - return filtered - - -def get_checked_field(document, name, value_type, default_value): - """Validates and retrieves a typed field from a document. - - This function attempts to look up doc[name], and raises - appropriate errors if the field is missing or not an - instance of the given type. - - :param document: dict-like object - :param name: field name - :param value_type: expected value type, or '*' to accept any type - :param default_value: Default value to use if the value is missing, - or None to make the value required. - :raises BadRequest: if the field is missing or not an - instance of value_type - :returns: value obtained from doc[name] - """ - - try: - value = document[name] - except KeyError: - if default_value is not None: - value = default_value - else: - description = _(u'Missing "{name}" field.').format(name=name) - raise api_errors.BadRequest(description) - - # PERF(kgriffs): We do our own little spec thing because it is way - # faster than jsonschema. - if value_type == '*' or isinstance(value, value_type): - return value - - description = _(u'The value of the "{name}" field must be a {vtype}.') - description = description.format(name=name, vtype=value_type.__name__) - raise api_errors.BadRequest(description) - - -def get_client_uuid(req): - """Read a required Client-ID from a request. - - :param req: Request object - :raises BadRequest: if the Client-ID header is missing or - does not represent a valid UUID - :returns: A UUID object - """ - - try: - return uuid.UUID(req._headers.get('Client-ID')) - except ValueError: - description = _(u'Malformed hexadecimal UUID.') - raise api_errors.BadRequest(description) - - -def get_headers(req): - kwargs = {} - - # TODO(vkmc) We should add a control here to make sure - # that the headers/request combination is possible - # e.g. we cannot have messages_post with grace - - if req._body.get('marker') is not None: - kwargs['marker'] = req._body.get('marker') - - if req._body.get('limit') is not None: - kwargs['limit'] = int(req._body.get('limit')) - - if req._body.get('detailed') is not None: - kwargs['detailed'] = strutils.bool_from_string( - req._body.get('detailed')) - - if req._body.get('echo') is not None: - kwargs['echo'] = strutils.bool_from_string(req._body.get('echo')) - - if req._body.get('include_claimed') is not None: - kwargs['include_claimed'] = strutils.bool_from_string( - req._body.get('include_claimed')) - - if req._body.get('ttl') is not None: - kwargs['ttl'] = int(req._body.get('ttl')) - - if req._body.get('grace') is not None: - kwargs['grace'] = int(req._body.get('grace')) - - return kwargs - - -def on_exception_sends_500(func): - """Handles generic Exceptions in API endpoints - - This decorator catches generic Exceptions and returns a generic - Response. - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as ex: - LOG.exception(ex) - error = _("Unexpected error.") - headers = {'status': 500} - # args[0] - Endpoints object, args[1] - Request object. - req = args[1] - return error_response(req, ex, headers, error) - - return wrapper - - -def error_response(req, exception, headers=None, error=None): - body = {'exception': str(exception), 'error': error} - resp = response.Response(req, body, headers) - return resp - - -def format_message(message, claim_id=None): - return { - 'id': message['id'], - 'claim_id': claim_id, - 'ttl': message['ttl'], - 'age': message['age'], - 'body': message['body'], - } diff --git a/zaqar/common/auth.py b/zaqar/common/auth.py deleted file mode 100644 index c593b041..00000000 --- a/zaqar/common/auth.py +++ /dev/null @@ -1,95 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from keystoneauth1 import loading -from keystoneauth1 import session -from keystoneclient.v3 import client -from oslo_config import cfg - - -PASSWORD_PLUGIN = 'password' -TRUSTEE_CONF_GROUP = 'trustee' -KEYSTONE_AUTHTOKEN_GROUP = 'keystone_authtoken' -loading.register_auth_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP) -loading.register_auth_conf_options(cfg.CONF, KEYSTONE_AUTHTOKEN_GROUP) -_ZAQAR_ENDPOINTS = {} - - -def _config_options(): - trustee_opts = loading.get_auth_common_conf_options() - trustee_opts.extend(loading.get_auth_plugin_conf_options(PASSWORD_PLUGIN)) - yield TRUSTEE_CONF_GROUP, trustee_opts - - -def get_trusted_token(trust_id): - """Return a Keystone token using the given trust_id.""" - auth_plugin = loading.load_auth_from_conf_options( - cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=trust_id) - - trust_session = session.Session(auth=auth_plugin) - return trust_session.auth.get_access(trust_session).auth_token - - -def _get_admin_session(conf_group): - auth_plugin = loading.load_auth_from_conf_options( - cfg.CONF, conf_group) - return session.Session(auth=auth_plugin) - - -def _get_user_client(auth_plugin): - sess = session.Session(auth=auth_plugin) - return client.Client(session=sess) - - -def create_trust_id(auth_plugin, trustor_user_id, trustor_project_id, roles, - expires_at): - """Create a trust with the given user for the configured trustee user.""" - admin_session = _get_admin_session(TRUSTEE_CONF_GROUP) - trustee_user_id = admin_session.get_user_id() - - client = _get_user_client(auth_plugin) - trust = client.trusts.create(trustor_user=trustor_user_id, - trustee_user=trustee_user_id, - project=trustor_project_id, - impersonation=True, - role_names=roles, - expires_at=expires_at) - return trust.id - - -def get_public_endpoint(): - """Get Zaqar's public endpoint from keystone""" - global _ZAQAR_ENDPOINTS - - if _ZAQAR_ENDPOINTS: - return _ZAQAR_ENDPOINTS - - zaqar_session = _get_admin_session(KEYSTONE_AUTHTOKEN_GROUP) - auth = zaqar_session.auth - if not auth: - return _ZAQAR_ENDPOINTS - - catalogs = auth.get_auth_ref(zaqar_session).service_catalog - try: - _ZAQAR_ENDPOINTS['zaqar'] = catalogs.url_for(service_name='zaqar') - except Exception: - pass - try: - _ZAQAR_ENDPOINTS['zaqar-websocket'] = catalogs.url_for( - service_name='zaqar-websocket') - except Exception: - pass - - return _ZAQAR_ENDPOINTS diff --git a/zaqar/common/cache.py b/zaqar/common/cache.py deleted file mode 100644 index e65bd309..00000000 --- a/zaqar/common/cache.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_cache import core - - -def register_config(conf): - core.configure(conf) - - -def get_cache(conf): - region = core.create_region() - return core.configure_cache_region(conf, region) diff --git a/zaqar/common/cli.py b/zaqar/common/cli.py deleted file mode 100644 index f5693941..00000000 --- a/zaqar/common/cli.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from __future__ import print_function -import functools -import sys - -from oslo_config import cfg -from oslo_log import log as logging - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -def _fail(returncode, ex): - """Handles terminal errors. - - :param returncode: process return code to pass to sys.exit - :param ex: the error that occurred - """ - - print(ex, file=sys.stderr) - - LOG.exception(ex) - sys.exit(returncode) - - -def runnable(func): - """Entry point wrapper. - - Note: This call blocks until the process is killed - or interrupted. - """ - - @functools.wraps(func) - def _wrapper(): - try: - logging.register_options(CONF) - logging.setup(CONF, 'zaqar') - func() - except KeyboardInterrupt: - LOG.info(u'Terminating') - except Exception as ex: - _fail(1, ex) - - return _wrapper diff --git a/zaqar/common/configs.py b/zaqar/common/configs.py deleted file mode 100644 index 5fcbdf82..00000000 --- a/zaqar/common/configs.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from oslo_config import cfg - - -_ADMIN_MODE_OPT = cfg.BoolOpt('admin_mode', default=False, - help='Activate privileged endpoints.') - - -_GENERAL_OPTIONS = ( - _ADMIN_MODE_OPT, - cfg.BoolOpt('pooling', default=False, - help=('Enable pooling across multiple storage backends. ' - 'If pooling is enabled, the storage driver ' - 'configuration is used to determine where the ' - 'catalogue/control plane data is kept.'), - deprecated_opts=[cfg.DeprecatedOpt('sharding')]), - cfg.BoolOpt('unreliable', default=False, - help='Disable all reliability constraints.'), - cfg.ListOpt('enable_deprecated_api_versions', default=[], - item_type=cfg.types.List(item_type=cfg.types.String( - choices=('1', '1.1'))), - help='List of deprecated API versions to enable.'), -) - -_DRIVER_OPTIONS = ( - cfg.StrOpt('transport', default='wsgi', - help='Transport driver to use.'), - cfg.StrOpt('message_store', default='mongodb', - deprecated_opts=[cfg.DeprecatedOpt('storage')], - help='Storage driver to use as the messaging store.'), - cfg.StrOpt('management_store', default='mongodb', - help='Storage driver to use as the management store.'), -) - -_DRIVER_GROUP = 'drivers' - - -_SIGNED_URL_OPTIONS = ( - cfg.StrOpt('secret_key', - help=('Secret key used to encrypt pre-signed URLs.')), -) - -_SIGNED_URL_GROUP = 'signed_url' - - -_NOTIFICATION_OPTIONS = ( - cfg.StrOpt('smtp_command', default='/usr/sbin/sendmail -t -oi', - help=('The command of smtp to send email. The format is ' - '"command_name arg1 arg2".')), - cfg.IntOpt('max_notifier_workers', default=10, - help='The max amount of the notification workers.'), - cfg.BoolOpt('require_confirmation', default=False, - help='Whether the http/https/email subscription need to be ' - 'confirmed before notification.'), - cfg.StrOpt('external_confirmation_url', - help='The confirmation page url that will be used in email ' - 'subscription confirmation before notification.'), - cfg.DictOpt("subscription_confirmation_email_template", - default={'topic': 'Zaqar Notification - Subscription ' - 'Confirmation', - 'body': 'You have chosen to subscribe to the ' - 'queue: {0}. This queue belongs to ' - 'project: {1}. ' - 'To confirm this subscription, ' - 'click or visit this link below: {2}', - 'sender': 'Zaqar Notifications ' - ''}, - help="Defines the set of subscription confirmation email " - "content, including topic, body and sender. There is " - "a mapping is {0} -> queue name, {1} ->project id, " - "{2}-> confirm url in body string. User can use any of " - "the three value. But they can't use more than three."), - cfg.DictOpt("unsubscribe_confirmation_email_template", - default={'topic': 'Zaqar Notification - ' - 'Unsubscribe Confirmation', - 'body': 'You have unsubscribed successfully to the ' - 'queue: {0}. This queue belongs to ' - 'project: {1}. ' - 'To resubscribe this subscription, ' - 'click or visit this link below: {2}', - 'sender': 'Zaqar Notifications ' - ''}, - help="Defines the set of unsubscribe confirmation email " - "content, including topic, body and sender. There is " - "a mapping is {0} -> queue name, {1} ->project id, " - "{2}-> confirm url in body string. User can use any of " - "the three value. But they can't use more than three."), -) - -_NOTIFICATION_GROUP = 'notification' - - -_PROFILER_OPTIONS = [ - cfg.BoolOpt("trace_wsgi_transport", default=False, - help="If False doesn't trace any transport requests." - "Please note that it doesn't work for websocket now."), - cfg.BoolOpt("trace_message_store", default=False, - help="If False doesn't trace any message store requests."), - cfg.BoolOpt("trace_management_store", default=False, - help="If False doesn't trace any management store requests.") -] - -_PROFILER_GROUP = "profiler" - - -def _config_options(): - return [(None, _GENERAL_OPTIONS), - (_DRIVER_GROUP, _DRIVER_OPTIONS), - (_SIGNED_URL_GROUP, _SIGNED_URL_OPTIONS), - (_NOTIFICATION_GROUP, _NOTIFICATION_OPTIONS), - (_PROFILER_GROUP, _PROFILER_OPTIONS)] diff --git a/zaqar/common/consts.py b/zaqar/common/consts.py deleted file mode 100644 index 96cb1adb..00000000 --- a/zaqar/common/consts.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -TRANSPORT_DRIVERS = ( - TRANSPORT_WSGI, TRANSPORT_WEBSOCKET, -) = ( - 'wsgi', 'websocket', -) - -MESSAGE_STORE = ( - MSG_STORE_MONGODB, -) = ( - 'mongodb', -) - -MANAGEMENT_STORE = ( - MGMT_STORE_MONGODB, -) = ( - 'mongodb', -) - -SUBSCRIPTION_OPS = ( - SUBSCRIPTION_CREATE, - SUBSCRIPTION_LIST, - SUBSCRIPTION_GET, - SUBSCRIPTION_DELETE, -) = ( - 'subscription_create', - 'subscription_list', - 'subscription_get', - 'subscription_delete', -) - -MESSAGE_OPS = ( - MESSAGE_POST, - MESSAGE_LIST, - MESSAGE_GET, - MESSAGE_GET_MANY, - MESSAGE_DELETE, - MESSAGE_DELETE_MANY, -) = ( - 'message_post', - 'message_list', - 'message_get', - 'message_get_many', - 'message_delete', - 'message_delete_many', -) - -QUEUE_OPS = ( - QUEUE_CREATE, - QUEUE_LIST, - QUEUE_GET, - QUEUE_DELETE, - QUEUE_GET_STATS, - QUEUE_PURGE -) = ( - 'queue_create', - 'queue_list', - 'queue_get', - 'queue_delete', - 'queue_get_stats', - 'queue_purge' -) - -CLAIM_OPS = ( - CLAIM_CREATE, - CLAIM_GET, - CLAIM_UPDATE, - CLAIM_DELETE, -) = ( - 'claim_create', - 'claim_get', - 'claim_update', - 'claim_delete', -) - -POOL_OPS = ( - POOL_CREATE, - POOL_LIST, - POOL_GET, - POOL_GET_DETAIL, - POOL_UPDATE, - POOL_DELETE, -) = ( - 'pool_create', - 'pool_list', - 'pool_get', - 'pool_get_detail', - 'pool_update', - 'pool_delete', -) - -FLAVOR_OPS = ( - FLAVOR_CREATE, - FLAVOR_LIST, - FLAVOR_GET, - FLAVOR_UPDATE, - FLAVOR_DELETE, -) = ( - 'flavor_create', - 'flavor_list', - 'flavor_get', - 'flavor_update', - 'flavor_delete', -) diff --git a/zaqar/common/decorators.py b/zaqar/common/decorators.py deleted file mode 100644 index 660bc0e7..00000000 --- a/zaqar/common/decorators.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -import msgpack -from oslo_cache import core -from oslo_log import log as logging -from oslo_serialization import jsonutils - - -LOG = logging.getLogger(__name__) - - -class TransportLog(object): - """Standard logging for transport driver responders - - This class implements a logging decorator that the transport driver - responders can use for standard logging - """ - - def __init__(self, resource_type): - self.resource_type = resource_type - - def __call__(self, func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - # The below line takes function names like 'on_get' and 'on_patch' - # and returns 'GET' and 'PATCH' respectively, so we do not need - # the name of the HTTP method to be passed. - method = func.__name__[3:].upper() - LOG.debug(u'%(type)s %(method)s: %(arguments)s', - {'type': self.resource_type, - 'method': method, - 'arguments': jsonutils.dumps(kwargs)}) - return func(*args, **kwargs) - - return wrapper - - -def memoized_getattr(meth): - """Memoizes attributes returned by __getattr__ - - It can be used to remember the results from - __getattr__ and reduce the debt of calling - it again when the same attribute is accessed. - - This decorator memoizes attributes by setting - them on the object itself. - - The wrapper returned by this decorator won't alter - the returned value. - - :returns: A wrapper around the decorated method. - """ - - @functools.wraps(meth) - def wrapper(self, method_name): - attr = meth(self, method_name) - setattr(self, method_name, attr) - return attr - return wrapper - - -def caches(keygen, ttl, cond=None): - """Flags a getter method as being cached using oslo_cache. - - It is assumed that the containing class defines an attribute - named `_cache` that is an instance of an oslo_cache backend. - - The getter should raise an exception if the value can't be - loaded, which will skip the caching step. Otherwise, the - getter must return a value that can be encoded with - msgpack. - - Note that you can also flag a remover method such that it - will purge an associated item from the cache, e.g.:: - - def project_cache_key(user, project=None): - return user + ':' + str(project) - - class Project(object): - def __init__(self, db, cache): - self._db = db - self._cache = cache - - @decorators.caches(project_cache_key, 60) - def get_project(self, user, project=None): - return self._db.get_project(user, project) - - @get_project.purges - def del_project(self, user, project=None): - self._db.delete_project(user, project) - - :param keygen: A static key generator function. This function - must accept the same arguments as the getter, sans `self`. - :param ttl: TTL for the cache entry, in seconds. - :param cond: Conditional for whether or not to cache the - value. Must be a function that takes a single value, and - returns True or False. - """ - - def purges_prop(remover): - - @functools.wraps(remover) - def wrapper(self, *args, **kwargs): - # First, purge from cache - key = keygen(*args, **kwargs) - self._cache.delete(key) - - # Remove/delete from origin - remover(self, *args, **kwargs) - - return wrapper - - def prop(getter): - - @functools.wraps(getter) - def wrapper(self, *args, **kwargs): - key = keygen(*args, **kwargs) - packed_value = self._cache.get(key, expiration_time=ttl) - - if packed_value is core.NO_VALUE: - value = getter(self, *args, **kwargs) - - # Cache new value if desired - if cond is None or cond(value): - # NOTE(kgriffs): Setting use_bin_type is essential - # for being able to distinguish between Unicode - # and binary strings when decoding; otherwise, - # both types are normalized to the MessagePack - # str format family. - packed_value = msgpack.packb(value, use_bin_type=True) - - self._cache.set(key, packed_value) - else: - # NOTE(kgriffs): unpackb does not default to UTF-8, - # so we have to explicitly ask for it. - value = msgpack.unpackb(packed_value, encoding='utf-8') - - return value - - wrapper.purges = purges_prop - return wrapper - - return prop - - -def lazy_property(write=False, delete=True): - """Creates a lazy property. - - :param write: Whether this property is "writable" - :param delete: Whether this property can be deleted. - """ - - def wrapper(fn): - attr_name = '_lazy_' + fn.__name__ - - def getter(self): - if not hasattr(self, attr_name): - setattr(self, attr_name, fn(self)) - return getattr(self, attr_name) - - def setter(self, value): - setattr(self, attr_name, value) - - def deleter(self): - delattr(self, attr_name) - - return property(fget=getter, - fset=write and setter, - fdel=delete and deleter, - doc=fn.__doc__) - return wrapper - - -def api_version_manager(version_info): - """Manage API versions based on their status - - This decorator disables `DEPRECATED` APIs by default unless the user - explicitly enables it by adding it to the `enable_deprecated_api_versions` - configuration option. - - :param version_info: Dictionary containing the API version info. - """ - api_version = version_info['id'] - api_updated = version_info['updated'] - deprecated = version_info['status'] == 'DEPRECATED' - - def wrapper(fn): - @functools.wraps(fn) - def register_api(driver, conf): - if (deprecated and - [api_version] not in conf.enable_deprecated_api_versions): - return None - - if deprecated: - LOG.warning('Enabling API version %(version)s. ' - 'This version was marked as deprecated in ' - '%(updated)s. Using it may expose security ' - 'issues, unexpected behavior or damage your ' - 'data.', {'version': api_version, - 'updated': api_updated}) - return fn(driver, conf) - return register_api - return wrapper diff --git a/zaqar/common/errors.py b/zaqar/common/errors.py deleted file mode 100644 index 34bc146c..00000000 --- a/zaqar/common/errors.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class InvalidDriver(Exception): - """A driver was not found or loaded.""" - - -class PatternNotFound(Exception): - """A string did not match the expected pattern or regex.""" - - -class InvalidAction(Exception): - """Raised when attempted a non existent action.""" - - -class ConfigurationError(Exception): - """An invalid value was used for a Zaqar configuration option.""" diff --git a/zaqar/common/pipeline.py b/zaqar/common/pipeline.py deleted file mode 100644 index a2e85c6f..00000000 --- a/zaqar/common/pipeline.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -"""This module implements a common Pipeline object. - -The pipeline can be used to enhance the storage layer with filtering, routing, -multiplexing and the like. For example: - - >>> stages = [MessageFilter(), EncryptionFilter(), QueueController()] - >>> pipeline = Pipeline(stages) - -Every stage has to implement the method it wants to hook into. This method -will be called when the pipeline consumption gets to that point - stage -ordering matters - and will continue unless the method call returns a value -that is not None. - -At least one of the stages has to implement the calling method. If none of -them do, an AttributeError exception will be raised. -""" - -import contextlib - -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ - -LOG = logging.getLogger(__name__) - - -class Pipeline(object): - - def __init__(self, pipeline=None): - self._pipeline = pipeline and list(pipeline) or [] - - @decorators.memoized_getattr - def __getattr__(self, name): - with self.consumer_for(name) as consumer: - return consumer - - @contextlib.contextmanager - def consumer_for(self, method): - """Creates a closure for `method` - - This method creates a closure to consume the pipeline - for `method`. - - :params method: The method name to call on each stage - :type method: `six.text_type` - - :returns: A callable to consume the pipeline. - """ - - def consumer(*args, **kwargs): - """Consumes the pipeline for `method` - - This function walks through the pipeline and calls - `method` for each of the items in the pipeline. A - warning will be logged for each stage not implementing - `method` and an Attribute error will be raised if - none of the stages do. - - :param args: Positional arguments to pass to the call. - :param kwargs: Keyword arguments to pass to the call. - - :raises AttributeError: if none of the stages implement `method` - """ - # NOTE(flaper87): Used as a way to verify - # the requested method exists in at least - # one of the stages, otherwise AttributeError - # will be raised. - target = None - result = None - - for stage in self._pipeline: - try: - target = getattr(stage, method) - except AttributeError: - sstage = six.text_type(stage) - msgtmpl = _(u"Stage %(stage)s does not " - "implement %(method)s") - LOG.debug(msgtmpl, {'stage': sstage, 'method': method}) - continue - - tmp = target(*args, **kwargs) - - # NOTE(flaper87): preserve the last, not None, result - if tmp is not None: - result = tmp - - # NOTE(flaper87): Will keep going forward - # through the stageline unless the call returns - # something. - if result is not None: - return result - - if target is None: - msg = _(u'Method %s not found in any of ' - 'the registered stages') % method - LOG.error(msg) - raise AttributeError(msg) - - yield consumer diff --git a/zaqar/common/storage/__init__.py b/zaqar/common/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/common/storage/select.py b/zaqar/common/storage/select.py deleted file mode 100644 index 842066c5..00000000 --- a/zaqar/common/storage/select.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""select: a collection of algorithms for choosing an entry from a -collection.""" - -import random - - -def weighted(objs, key='weight', generator=random.randint): - """Perform a weighted select given a list of objects. - - :param objs: a list of objects containing at least the field `key` - :type objs: [dict] - :param key: the field in each obj that corresponds to weight - :type key: six.text_type - :param generator: a number generator taking two ints - :type generator: function(int, int) -> int - :return: an object - :rtype: dict - """ - acc = 0 - lookup = [] - - # construct weighted spectrum - for o in objs: - # NOTE(cpp-cabrera): skip objs with 0 weight - if o[key] <= 0: - continue - acc += o[key] - lookup.append((o, acc)) - - # no objects were found - if not lookup: - return None - - # NOTE(cpp-cabrera): select an object from the lookup table. If - # the selector lands in the interval [lower, upper), then choose - # it. - gen = generator - selector = gen(0, acc - 1) - lower = 0 - for obj, upper in lookup: - if lower <= selector < upper: - return obj - lower = upper diff --git a/zaqar/common/transport/__init__.py b/zaqar/common/transport/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/common/transport/wsgi/__init__.py b/zaqar/common/transport/wsgi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/common/transport/wsgi/helpers.py b/zaqar/common/transport/wsgi/helpers.py deleted file mode 100644 index aa8a87b5..00000000 --- a/zaqar/common/transport/wsgi/helpers.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""wsgi transport helpers.""" - -from distutils import version -import re -import uuid - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import urls -from zaqar import context -from zaqar.i18n import _ -from zaqar.transport import validation - - -LOG = logging.getLogger(__name__) - - -def verify_pre_signed_url(key, req, resp, params): - headers = req.headers - project = headers.get('X-PROJECT-ID') - expires = headers.get('URL-EXPIRES') - methods = headers.get('URL-METHODS', '').split(',') - paths = headers.get('URL-PATHS', '').split(',') - signature = headers.get('URL-SIGNATURE') - - if not signature: - return - - if req.method not in methods: - raise falcon.HTTPNotFound() - - # Support to query single resource with pre-signed url - if not any([p for p in paths if re.search(p, req.path)]): - raise falcon.HTTPNotFound() - - try: - verified = urls.verify_signed_headers_data(key, paths, - project=project, - methods=methods, - expires=expires, - signature=signature) - except ValueError: - raise falcon.HTTPNotFound() - - if not verified: - raise falcon.HTTPNotFound() - - -def get_client_uuid(req): - """Read a required Client-ID from a request. - - :param req: A falcon.Request object - :raises HTTPBadRequest: if the Client-ID header is missing or - does not represent a valid UUID - :returns: A UUID object - """ - - try: - return uuid.UUID(req.get_header('Client-ID', required=True)) - - except ValueError: - description = _(u'Malformed hexadecimal UUID.') - raise falcon.HTTPBadRequest('Wrong UUID value', description) - - -def extract_project_id(req, resp, params): - """Adds `project_id` to the list of params for all responders - - Meant to be used as a `before` hook. - - :param req: request sent - :type req: falcon.request.Request - :param resp: response object to return - :type resp: falcon.response.Response - :param params: additional parameters passed to responders - :type params: dict - :rtype: None - """ - api_version_string = req.path.split('/')[1] - params['project_id'] = req.get_header('X-PROJECT-ID') - if not api_version_string: - # NOTE(jaosorior): The versions resource is public and shouldn't need - # a check for the project-id. - return - if params['project_id'] == "": - raise falcon.HTTPBadRequest('Empty project header not allowed', - _(u'X-PROJECT-ID cannot be an empty ' - u'string. Specify the right header ' - u'X-PROJECT-ID and retry.')) - - api_version = version.LooseVersion(api_version_string) - if (not params['project_id'] and api_version >= - version.LooseVersion('v1.1')): - raise falcon.HTTPBadRequest('Project-Id Missing', - _(u'The header X-PROJECT-ID was missing')) - - -def require_client_id(req, resp, params): - """Makes sure the header `Client-ID` is present in the request - - Use as a before hook. - :param req: request sent - :type req: falcon.request.Request - :param resp: response object to return - :type resp: falcon.response.Response - :param params: additional parameters passed to responders - :type params: dict - :rtype: None - """ - - if req.path.startswith('/v1.1/') or req.path.startswith('/v2/'): - # NOTE(flaper87): `get_client_uuid` already raises 400 - # it the header is missing. - get_client_uuid(req) - - -def validate_queue_identification(validate, req, resp, params): - """Hook for validating the queue name and project id in requests. - - The queue name validation is short-circuited if 'queue_name' does - not exist in `params`. - - This hook depends on the `get_project` hook, which must be - installed upstream. - - - :param validate: A validator function that will - be used to check the queue name against configured - limits. functools.partial or a closure must be used to - set this first arg, and expose the remaining ones as - a Falcon hook interface. - :param req: Falcon request object - :param resp: Falcon response object - :param params: Responder params dict - """ - - try: - validate(params['queue_name'], - params['project_id']) - except KeyError: - # NOTE(kgriffs): queue_name not in params, so nothing to do - pass - except validation.ValidationFailed: - project = params['project_id'] - queue = params['queue_name'] - if six.PY2: - queue = queue.decode('utf-8', 'replace') - - LOG.debug(u'Invalid queue name "%(queue)s" submitted for ' - u'project: %(project)s', - {'queue': queue, 'project': project}) - - raise falcon.HTTPBadRequest(_(u'Invalid queue identification'), - _(u'The format of the submitted queue ' - u'name or project id is not valid.')) - - -def require_accepts_json(req, resp, params): - """Raises an exception if the request does not accept JSON - - Meant to be used as a `before` hook. - - :param req: request sent - :type req: falcon.request.Request - :param resp: response object to return - :type resp: falcon.response.Response - :param params: additional parameters passed to responders - :type params: dict - :rtype: None - :raises HTTPNotAcceptable: if the request does not accept JSON - """ - if not req.client_accepts('application/json'): - raise falcon.HTTPNotAcceptable( - u''' -Endpoint only serves `application/json`; specify client-side -media type support with the "Accept" header.''', - href=u'http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html', - href_text=u'14.1 Accept, Hypertext Transfer Protocol -- HTTP/1.1') - - -def require_content_type_be_non_urlencoded(req, resp, params): - """Raises an exception on "x-www-form-urlencoded" content type of request. - - If request has body and "Content-Type" header has - "application/x-www-form-urlencoded" value (case-insensitive), this function - raises falcon.HTTPBadRequest exception. - - This strange function exists only to prevent bug/1547100 in a backward - compatible way. - - Meant to be used as a `before` hook. - - :param req: request sent - :type req: falcon.request.Request - :param resp: response object to return - :type resp: falcon.response.Response - :param params: additional parameters passed to responders - :type params: dict - :rtype: None - :raises HTTPBadRequest: if request has body and "Content-Type" header has - "application/x-www-form-urlencoded" value - """ - if req.content_length is None: - return - if req.content_type and (req.content_type.lower() == - 'application/x-www-form-urlencoded'): - title = _(u'Invalid Content-Type') - description = _(u'Endpoint does not accept ' - u'`application/x-www-form-urlencoded` content; ' - u'currently supported media type is ' - u'`application/json`; specify proper client-side ' - u'media type with the "Content-Type" header.') - raise falcon.HTTPBadRequest(title, description) - - -def inject_context(req, resp, params): - """Inject context value into request environment. - - :param req: request sent - :type req: falcon.request.Request - :param resp: response object - :type resp: falcon.response.Response - :param params: additional parameters passed to responders - :type params: dict - :rtype: None - - """ - client_id = req.get_header('Client-ID') - project_id = params.get('project_id') - request_id = req.headers.get('X-Openstack-Request-ID'), - auth_token = req.headers.get('X-AUTH-TOKEN') - user = req.headers.get('X-USER-ID') - tenant = req.headers.get('X-TENANT-ID') - - roles = req.headers.get('X-ROLES') - roles = roles and roles.split(',') or [] - - ctxt = context.RequestContext(project_id=project_id, - client_id=client_id, - request_id=request_id, - auth_token=auth_token, - user=user, - tenant=tenant, - roles=roles) - req.env['zaqar.context'] = ctxt diff --git a/zaqar/common/urls.py b/zaqar/common/urls.py deleted file mode 100644 index afe7c08a..00000000 --- a/zaqar/common/urls.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import hashlib -import hmac - -from oslo_utils import timeutils -import six - -_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' - - -def create_signed_url(key, paths, project=None, expires=None, methods=None): - """Creates a signed url for the specified path - - This function will create a pre-signed URL for `path` using the - specified `options` or the default ones. The signature will be the - hex value of the hmac created using `key` - - :param key: A string to use as a `key` for the hmac generation. - :param paths: A list of strings representing URL paths. - :param project: (Default None) The ID of the project this URL belongs to. - :param methods: (Default ['GET']) A list of methods that will be - supported by the generated URL. - :params expires: (Default time() + 86400) The expiration date for - the generated URL. - """ - - methods = methods or ['GET'] - - if key is None: - raise ValueError('The `key` can\'t be None') - - if not isinstance(paths, list) or not paths: - raise ValueError('`paths` must be a non-empty list') - - if not isinstance(methods, list): - raise ValueError('`methods` should be a list') - - # NOTE(flaper87): The default expiration time is 1day - # Evaluate whether this should be configurable. We may - # also want to have a "maximum" expiration time. Food - # for thoughts. - if expires is not None: - # NOTE(flaper87): Verify if the format is correct - # and normalize the value to UTC. - check_expires = None - try: - check_expires = int(expires) - except ValueError: - pass - if check_expires: - raise ValueError('`expires` should be date format, ' - 'for example 2016-01-01T00:00:00, ' - 'not integer value: %s' % check_expires) - parsed = timeutils.parse_isotime(expires) - expires = timeutils.normalize_time(parsed) - else: - delta = datetime.timedelta(days=1) - expires = timeutils.utcnow() + delta - - if expires <= timeutils.utcnow(): - raise ValueError('`expires` is lower than the current time') - - methods = sorted(methods) - paths = sorted(paths) - expires_str = expires.strftime(_DATE_FORMAT) - hmac_body = six.b(r'%(paths)s\n%(methods)s\n%(project)s\n%(expires)s' % - {'paths': ','.join(paths), 'methods': ','.join(methods), - 'project': project, 'expires': expires_str}) - - if not isinstance(key, six.binary_type): - key = six.binary_type(key.encode('utf-8')) - - return {'paths': paths, - 'methods': methods, - 'project': project, - 'expires': expires_str, - 'signature': hmac.new(key, hmac_body, hashlib.sha256).hexdigest()} - - -def verify_signed_headers_data(key, paths, project, - signature, methods, expires): - """Verify that `signature` matches for the given values - - :param key: A string to use as a `key` for the hmac generation. - :param paths: A list of strings representing URL paths. - :param project: The ID of the project this URL belongs to. - :param signature: The pre-generated signature - :param methods: A list of methods that will be - supported by the generated URL. - :params expires: The expiration date for - the generated URL. - """ - - generated = create_signed_url(key, paths, project=project, - methods=methods, expires=expires) - - return signature == generated['signature'] diff --git a/zaqar/common/utils.py b/zaqar/common/utils.py deleted file mode 100644 index 8c605452..00000000 --- a/zaqar/common/utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""utils: general-purpose utilities.""" - -from oslo_config import cfg -import six - - -def fields(d, names, pred=lambda x: True, - key_transform=lambda x: x, value_transform=lambda x: x): - """Returns the entries in this dictionary with keys appearing in names. - - :type d: dict - :type names: [a] - :param pred: a filter that is applied to the values of the dictionary. - :type pred: (a -> bool) - :param key_transform: a transform to apply to the key before returning it - :type key_transform: a -> a - :param value_transform: a transform to apply to the value before - returning it - :type value_transform: a -> a - :rtype: dict - """ - - return dict((key_transform(k), value_transform(v)) - for k, v in d.items() - if k in names and pred(v)) - - -_pytype_to_cfgtype = { - six.text_type: cfg.StrOpt, - str: cfg.StrOpt, - int: cfg.IntOpt, - bool: cfg.BoolOpt, - float: cfg.FloatOpt, - list: cfg.ListOpt, - dict: cfg.DictOpt -} - - -def dict_to_conf(options): - """Converts a python dictionary to a list of oslo_config.cfg.Opt - - :param options: The python dictionary to convert - :type options: dict - :returns: a list of options compatible with oslo_config - :rtype: [oslo_config.cfg.Opt] - """ - - opts = [] - - for k, v in options.items(): - opt_type = _pytype_to_cfgtype[type(v)] - opts.append(opt_type(name=k, default=v)) - - return opts diff --git a/zaqar/context.py b/zaqar/context.py deleted file mode 100644 index 339b091e..00000000 --- a/zaqar/context.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""RequestContext: context for requests that persist through all of zaqar.""" - -from oslo_context import context - - -class RequestContext(context.RequestContext): - - def __init__(self, project_id=None, client_id=None, overwrite=True, - auth_token=None, user=None, tenant=None, domain=None, - user_domain=None, project_domain=None, is_admin=False, - read_only=False, show_deleted=False, request_id=None, - instance_uuid=None, roles=None, **kwargs): - super(RequestContext, self).__init__(auth_token=auth_token, - user=user, - tenant=tenant, - domain=domain, - user_domain=user_domain, - project_domain=project_domain, - is_admin=is_admin, - read_only=read_only, - show_deleted=False, - request_id=request_id, - roles=roles) - self.project_id = project_id - self.client_id = client_id - if overwrite or not hasattr(context._request_store, 'context'): - self.update_store() - - def update_store(self): - context._request_store.context = self - - def to_dict(self): - ctx = super(RequestContext, self).to_dict() - ctx.update({ - 'project_id': self.project_id, - 'client_id': self.client_id - }) - return ctx diff --git a/zaqar/hacking/__init__.py b/zaqar/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/hacking/checks.py b/zaqar/hacking/checks.py deleted file mode 100644 index b8007f26..00000000 --- a/zaqar/hacking/checks.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - - -_all_log_levels = {'critical', 'error', 'exception', 'info', - 'warning', 'debug'} - -# Since _Lx() have been removed, we just need to check _() -_all_hints = {'_'} - -_log_translation_hint = re.compile( - r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % { - 'levels': '|'.join(_all_log_levels), - 'hints': '|'.join(_all_hints), - }) - - -def no_translate_logs(logical_line): - """N537 - Don't translate logs. - - Check for 'LOG.*(_(' - - Translators don't provide translations for log messages, and operators - asked not to translate them. - - * This check assumes that 'LOG' is a logger. - - :param logical_line: The logical line to check. - :returns: None if the logical line passes the check, otherwise a tuple - is yielded that contains the offending index in logical line and a - message describe the check validation failure. - """ - if _log_translation_hint.match(logical_line): - yield (0, "N537: Log messages should not be translated!") - - -def factory(register): - register(no_translate_logs) diff --git a/zaqar/i18n.py b/zaqar/i18n.py deleted file mode 100644 index a03e095b..00000000 --- a/zaqar/i18n.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html . - -""" - -import oslo_i18n as i18n - -_translators = i18n.TranslatorFactory(domain='zaqar') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/zaqar/locale/es/LC_MESSAGES/zaqar.po b/zaqar/locale/es/LC_MESSAGES/zaqar.po deleted file mode 100644 index e5c601e7..00000000 --- a/zaqar/locale/es/LC_MESSAGES/zaqar.po +++ /dev/null @@ -1,416 +0,0 @@ -# Translations template for zaqar. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the zaqar project. -# -# Translators: -# Adriana Chisco Landazábal , 2015 -# Milton Mazzarri , 2014 -# Pablo Sanchez , 2015 -# Victoria Martínez de la Cruz , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: zaqar 2.0.1.dev22\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-05-03 22:29+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-27 07:38+0000\n" -"Last-Translator: openstackjenkins \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -msgid "" -"\n" -"X-PROJECT-ID cannot be an empty string. Specify the right header X-PROJECT-" -"ID\n" -"and retry." -msgstr "" -"\n" -"X-PROJECT-ID no puede ser una cadena vacía. Especifique el encabezado X-" -"PROJECT-ID\n" -"correcto y vuelva a intentar." - -#, python-format -msgid "" -"%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " -"\"%(queue)s\" under project %(project)s" -msgstr "" -"%(attempts)d intento(s) requerido para publicar %(num_messages)d mensajes a " -"la cola \"%(queue)s\" del proyecto %(project)s" - -msgid "A claim was specified, but the message is not currently claimed." -msgstr "Se pudo especificar una solicitud pero no se ha solicitado mensaje." - -#, python-format -msgid "Cannot retrieve queue %s stats." -msgstr "No se puede recuperar estadísticas de cola %s." - -#, python-format -msgid "Cannot retrieve queue %s." -msgstr "No se puede recuperar cola %s." - -msgid "Claim could not be created." -msgstr "No se pudo crear solicitud." - -msgid "Claim could not be deleted." -msgstr "No se pudo eliminar solicitud." - -msgid "Claim could not be queried." -msgstr "No se pudo realizar la solicitud." - -msgid "Claim could not be updated." -msgstr "No se pudo actualizar solicitud." - -msgid "Doctype must be either a JSONObject or JSONArray" -msgstr "Doctype debe ser un JSONObject o un JSONObject " - -msgid "Document type not supported." -msgstr "Tipo de documento no soportado." - -msgid "" -"Either a replica set or a mongos is required to guarantee message delivery" -msgstr "" -"Se requiere un conjunto de réplica o mongos para garantizar que el mensaje " -"sea enviado" - -#, python-format -msgid "" -"Exceeded maximum retry duration for queue \"%(queue)s\" under project " -"%(project)s" -msgstr "" -"Se excedió la duración máxima de intentos para la cola \"%(queue)s\" del " -"proyecto %(project)s" - -#, python-format -msgid "" -"Failed to increment the message counter for queue %(name)s and project " -"%(project)s" -msgstr "" -"Fallo al incrementar el contador de mensajes para la cola %(name)s y " -"proyecto %(project)s" - -#, python-format -msgid "" -"First attempt failed while adding messages to queue \"%(queue)s\" under " -"project %(project)s" -msgstr "" -"Fallo el primer intento mientras agregaba mensajes a la cola \"%(queue)s\" " -"del proyecto %(project)s" - -msgid "Health status could not be read." -msgstr "No se pudo leer el estado de salud." - -#, python-format -msgid "" -"Hit maximum number of attempts (%(max)s) for queue \"%(queue)s\" under " -"project %(project)s" -msgstr "" -"Alcanzado el número máximo (%(max)s) de intentos para la cola \"%(queue)s\" " -"del proyecto %(project)s" - -msgid "Invalid API request" -msgstr "Solicitud API no válida" - -msgid "Invalid queue identification" -msgstr "Identificador de cola inválido" - -msgid "Invalid request body" -msgstr "Cuerpo de solicitud es inválido" - -msgid "Invalid request." -msgstr "Solicitud no válida" - -msgid "Invalid scheme in Redis URI" -msgstr "Esquema en Redis URI no válido" - -msgid "JSON contains integer that is too large." -msgstr "JSON contiene entero muy largo." - -msgid "Limit must be at least 1 and may not be greater than {0}." -msgstr "Límite debe ser al menos 1 y no mayor que {0}." - -msgid "Limit must be at least 1 and no greater than {0}." -msgstr "Límite debe ser al menos 1 y no mayor que {0}." - -msgid "Malformed Redis URI" -msgstr "URI de Redis incorrecta" - -msgid "Malformed hexadecimal UUID." -msgstr "Valor hexadecimal UUID mal formado." - -msgid "Message collection size is too large. Max size {0}" -msgstr "El tamaño de la colección de mensajes es muy largo. Tamaño máximo: {0}" - -msgid "Message could not be deleted." -msgstr "No se pudo eliminar mensaje." - -msgid "Message could not be retrieved." -msgstr "No se pudo recuperar mensaje." - -msgid "Messages could not be deleted." -msgstr "No se pudo eliminar mensajes." - -msgid "Messages could not be enqueued." -msgstr "Np se pudo poner mensajes en cola." - -msgid "Messages could not be listed." -msgstr "No se pudo listar mensajes." - -msgid "Messages could not be popped." -msgstr "Los mensajes no se pudieron dirigir." - -msgid "Metadata could not be updated." -msgstr "No se pudo actualizar metadatos." - -#, python-format -msgid "Method %s not found in any of the registered stages" -msgstr "El metodo %s no se ha encontrado en ninguna de las etapas registradas" - -msgid "Missing \"{name}\" field." -msgstr "Campo \"{name}\" no presente." - -msgid "Missing host name in Redis URI" -msgstr "Hace falta el nombre de host en Redis URI" - -#, python-format -msgid "Missing parameter %s in body." -msgstr "Falta parámetro %s en el cuerpo." - -msgid "Missing path in Redis URI" -msgstr "Hace falta la ruta en Redis URI" - -msgid "No messages could be enqueued." -msgstr "No hay mensajes para poner en cola." - -msgid "No messages to enqueu." -msgstr "No hay mensajes para colocar en la cola." - -msgid "No messages were found in the request body." -msgstr "No se encontraron mensajes en el cuerpo de solicitud." - -msgid "No subscription to create." -msgstr "No hay suscripción para crear." - -msgid "Options must be a dict." -msgstr "Las opciones deben ser un dict." - -msgid "" -"Pipeline to use for processing {0} operations. This pipeline will be " -"consumed before calling the storage driver's controller methods." -msgstr "" -"La segmentación a usar para las operaciones de procesamiento {0}. Esta " -"segmentación se empleará antes de llamar los métodos de almacenamiento del " -"controlador." - -msgid "Please try again in a few seconds." -msgstr "Por favor intente de nuevo en unos segundos." - -msgid "Pop value must be at least 1 and may not be greater than {0}." -msgstr "El valor pop debe ser al menos 1 y no debe ser mayor que {0}." - -msgid "Project ids may not be more than {0} characters long." -msgstr "Los ids de proyecto no deben ocupar más de {0} caracteres. " - -#, python-format -msgid "Queue %s could not be created." -msgstr "No se pudo crear cola %s." - -#, python-format -msgid "Queue %s could not be deleted." -msgstr "No se pudo eliminar cola %s." - -#, python-format -msgid "Queue %s created." -msgstr "Se ha creado cola %s." - -#, python-format -msgid "Queue %s does not exist." -msgstr "No existe cola %s." - -#, python-format -msgid "Queue %s removed." -msgstr "Se ha eliminado cola %s." - -msgid "Queue could not be created." -msgstr "No se pudo crear cola." - -msgid "Queue could not be deleted." -msgstr "No se pudo eliminar cola." - -msgid "Queue metadata could not be retrieved." -msgstr "No se pudo recuperar metadatos en cola." - -msgid "Queue metadata is too large. Max size: {0}" -msgstr "Los metadatos de la cola son muy largos. Tamaño máximo: {0}" - -msgid "Queue names may not be more than {0} characters long." -msgstr "Los nombres de colas no deben ocupar más de {0} caracteres." - -msgid "" -"Queue names may only contain ASCII letters, digits, underscores, and dashes." -msgstr "" -"Los nombres de colas solo pueden contener caracteres ASCII, números, guiones " -"bajos y guiones." - -msgid "Queue stats could not be read." -msgstr "No se pueden leer las estadísticas de cola." - -msgid "Queues could not be listed." -msgstr "No se pudo listar colas." - -msgid "Request body can not be empty" -msgstr "Cuerpo de la solicitud no puede estar vacío" - -msgid "Request body could not be parsed." -msgstr "Cuerpo de la solicitud no pudo ser analizado." - -msgid "Request body could not be read." -msgstr "El cuerpo de la petición no pudo ser leído." - -msgid "Service temporarily unavailable" -msgstr "Servicio no disponible temporalmente" - -#, python-format -msgid "Serving on host %(bind)s:%(port)s" -msgstr "Sirviendo en la máquina %(bind)s:%(port)s" - -#, python-format -msgid "Stage %(stage)s could not be imported: %(ex)s" -msgstr "No se puede importar: %(ex)s la etapa %(stage)s" - -#, python-format -msgid "Stage %(stage)s does not implement %(method)s" -msgstr "La etapa %(stage)s no implementa %(method)s" - -msgid "Subscription could not be created." -msgstr "No se pudo crear suscripción." - -msgid "Subscription could not be deleted." -msgstr "No se puede eliminar suscripción." - -msgid "Subscription could not be retrieved." -msgstr "No se puede recuperar suscripción." - -msgid "Subscriptions could not be listed." -msgstr "No se pueden listar suscripciones." - -msgid "TTL must be an integer." -msgstr "TTL debe ser un entero." - -msgid "Terminating" -msgstr "Terminando" - -msgid "" -"The Redis URI specifies multiple sentinel hosts, but is missing the \"master" -"\" query string parameter. Please set \"master\" to the name of the Redis " -"master server as specified in the sentinel configuration file." -msgstr "" -"El Redis URI especifica múltiples hosts de sentinel, pero hace falta el " -"parámetro de secuencia de consulta \"master\". Por favor configure \"master" -"\" en el nombre del servidor maestro Redis como se especifica en el fichero " -"de configuración sentinel." - -msgid "The Redis configuration URI contains an invalid port" -msgstr "La configuración URI de Redis contiene un puerto no válido" - -msgid "The Redis configuration URI does not define any sentinel hosts" -msgstr "La configuración URI de Redis no define hosts de sentinel alguno" - -#, python-format -msgid "The Redis driver requires redis-server>=2.6, %s found" -msgstr "El controlador de Redis requiere edis-server>=2.6, %s encontrado" - -msgid "" -"The TTL for a claim may not exceed {0} seconds, and must be at least {1} " -"seconds long." -msgstr "" -"El TTL para una solicitud no debe exceder {0} segundos y debe durar al menos " -"{1} segundos." - -msgid "" -"The TTL for a message may not exceed {0} seconds, and must be at least {1} " -"seconds long." -msgstr "" -"El TTL para un mensaje no debe exceder {0} segundos, y debe ser al menos de " -"{1} segundos de largo." - -msgid "The format of the submitted queue name or project id is not valid." -msgstr "" -"El formato del nombre de la cola propuesto o el id del proyecto no es válido." - -msgid "" -"The grace for a claim may not exceed {0} seconds, and must be at least {1} " -"seconds long." -msgstr "" -"El periodo de gracia para una solicitud no debe exceder {0} segundos y debe " -"durar al menos {1} segundos." - -msgid "The header X-PROJECT-ID was missing" -msgstr "Faltaba la cabecera X-PROJECT-ID" - -#, python-format -msgid "The mongodb driver requires mongodb>=2.2, %s found" -msgstr "El driver mongodb requiere mongodb>=2.2, %s encontrado" - -msgid "" -"The request should have either \"ids\" or \"pop\" parameter in the request, " -"to be able to delete." -msgstr "" -"La solicitud debe contener parámetro \"ids\" o \"pop\" para que sea posible " -"la eliminación." - -msgid "The specified claim does not exist or has expired." -msgstr "La solicitud especificada no existe o ha expirado." - -msgid "The subscriber type of subscription must be supported in the list {0}." -msgstr "" -"El tipo de suscriptor en la suscripción debe ser soportado en la lista {0}." - -msgid "The value of the \"{name}\" field must be a {vtype}." -msgstr "El valor del campo \"{name}\" debe ser {vtype}." - -msgid "This message is claimed; it cannot be deleted without a valid claim ID." -msgstr "" -"El mensaje está solicitado; no se puede borrar sin un identificador de " -"solicitud válido." - -msgid "This pool is used by flavors {flavor}; It cannot be deleted." -msgstr "Los tipos {flavor} usan este pool; no puede eliminarse." - -msgid "Unable to create" -msgstr "No se puede crear" - -msgid "Unable to delete" -msgstr "No se puede eliminar" - -msgid "Unable to update subscription" -msgstr "No se puede actualizar suscripción" - -msgid "Unexpected error." -msgstr "Error inesperado." - -msgid "" -"Using a write concern other than `majority` or > 2 makes the service " -"unreliable. Please use a different write concern or set `unreliable` to True " -"in the config file." -msgstr "" -"Emplear un asunto escrito diferente a `majority` o > 2 hace que el servicio " -"sea desconfiable. Por favor emplee un asunto diferente o configure " -"`unreliable` como True en el fichero de configuración." - -msgid "ids parameter should have at least 1 and not greater than {0} values." -msgstr "" -"el parámetro de identificador debe tener al menos entre 1 y {0} valores. " - -msgid "pop and id params cannot be present together in the delete request." -msgstr "" -"Los parámetros pop e identificador no pueden estar presentes en la solicitud " -"de eliminación." - -msgid "{0} is not a valid action" -msgstr "{0} no es una acción valida" diff --git a/zaqar/locale/fr/LC_MESSAGES/zaqar-log-error.po b/zaqar/locale/fr/LC_MESSAGES/zaqar-log-error.po deleted file mode 100644 index bba5ac17..00000000 --- a/zaqar/locale/fr/LC_MESSAGES/zaqar-log-error.po +++ /dev/null @@ -1,43 +0,0 @@ -# Translations template for zaqar. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the zaqar project. -# -# Translators: -# Maxime COQUEREL , 2014-2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: zaqar 2.0.1.dev22\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-05-03 22:29+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-11-27 04:24+0000\n" -"Last-Translator: Maxime Coquerel \n" -"Language-Team: French (http://www.transifex.com/openstack/zaqar/language/" -"fr/)\n" -"Language: fr\n" -"Generated-By: Babel 2.0\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"X-Generator: Zanata 3.7.3\n" - -#, python-format -msgid "Invalid paths: %s" -msgstr "Chemins non valides: %s" - -msgid "The `key` can't be None" -msgstr "La clé ne peut pas être nulle" - -#, python-format -msgid "Unknown keys: %s" -msgstr "Clés inconnues: %s" - -msgid "`expires` is lower than the current time" -msgstr "` expires` est inférieur au temps courant" - -msgid "`methods` should be a list" -msgstr "`methods`doit être une liste" - -msgid "`paths` must be a non-empty list" -msgstr "`paths` ne doit pas etre une liste vide" diff --git a/zaqar/notification/__init__.py b/zaqar/notification/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/notification/notifier.py b/zaqar/notification/notifier.py deleted file mode 100644 index 485e91ec..00000000 --- a/zaqar/notification/notifier.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum -from stevedore import driver - -import futurist -from oslo_log import log as logging -from six.moves import urllib_parse - -from zaqar.common import auth -from zaqar.common import urls -from zaqar.storage import pooling - -LOG = logging.getLogger(__name__) - - -@enum.unique -class MessageType(enum.IntEnum): - """Enum of message type.""" - SubscriptionConfirmation = 1 - UnsubscribeConfirmation = 2 - Notification = 3 - - -class NotifierDriver(object): - """Notifier which is responsible for sending messages to subscribers. - - """ - - def __init__(self, *args, **kwargs): - self.subscription_controller = kwargs.get('subscription_controller') - max_workers = kwargs.get('max_notifier_workers', 10) - self.executor = futurist.ThreadPoolExecutor(max_workers=max_workers) - self.require_confirmation = kwargs.get('require_confirmation', False) - - def post(self, queue_name, messages, client_uuid, project=None): - """Send messages to the subscribers.""" - if self.subscription_controller: - if not isinstance(self.subscription_controller, - pooling.SubscriptionController): - marker = None - while True: - subscribers = self.subscription_controller.list( - queue_name, project, marker=marker) - for sub in next(subscribers): - LOG.debug("Notifying subscriber %r", (sub,)) - s_type = urllib_parse.urlparse( - sub['subscriber']).scheme - # If the subscriber doesn't contain 'confirmed', it - # means that this kind of subscriber was created before - # the confirm feature be introduced into Zaqar. We - # should allow them be subscribed. - if (self.require_confirmation and - not sub.get('confirmed', True)): - LOG.info('The subscriber %s is not ' - 'confirmed.', sub['subscriber']) - continue - for msg in messages: - msg['Message_Type'] = MessageType.Notification.name - self._execute(s_type, sub, messages) - marker = next(subscribers) - if not marker: - break - else: - LOG.error('Failed to get subscription controller.') - - def send_confirm_notification(self, queue, subscription, conf, - project=None, expires=None, - api_version=None, is_unsubscribed=False): - # NOTE(flwang): If the confirmation feature isn't enabled, just do - # nothing. Here we're getting the require_confirmation from conf - # object instead of using self.require_confirmation, because the - # variable from self object really depends on the kwargs when - # initializing the NotifierDriver object. See bug 1655812 for more - # information. - if not conf.notification.require_confirmation: - return - - key = conf.signed_url.secret_key - if not key: - LOG.error("Can't send confirm notification due to the value of" - " secret_key option is None") - return - url = "/%s/queues/%s/subscriptions/%s/confirm" % (api_version, queue, - subscription['id']) - pre_url = urls.create_signed_url(key, [url], project=project, - expires=expires, methods=['PUT']) - message = None - if is_unsubscribed: - message_type = MessageType.UnsubscribeConfirmation.name - message = ('You have unsubscribed successfully to the queue: %s, ' - 'you can resubscribe it by using confirmed=True.' - % queue) - else: - message_type = MessageType.SubscriptionConfirmation.name - message = 'You have chosen to subscribe to the queue: %s' % queue - - messages = {} - endpoint_dict = auth.get_public_endpoint() - if endpoint_dict: - wsgi_endpoint = endpoint_dict.get('zaqar') - if wsgi_endpoint: - wsgi_subscribe_url = urllib_parse.urljoin( - wsgi_endpoint, url) - messages['WSGISubscribeURL'] = wsgi_subscribe_url - websocket_endpoint = endpoint_dict.get('zaqar-websocket') - if websocket_endpoint: - websocket_subscribe_url = urllib_parse.urljoin( - websocket_endpoint, url) - messages['WebSocketSubscribeURL'] = websocket_subscribe_url - messages.update({'Message_Type': message_type, - 'Message': message, - 'URL-Signature': pre_url['signature'], - 'URL-Methods': pre_url['methods'][0], - 'URL-Paths': pre_url['paths'][0], - 'X-Project-ID': pre_url['project'], - 'URL-Expires': pre_url['expires'], - 'SubscribeBody': {'confirmed': True}, - 'UnsubscribeBody': {'confirmed': False}}) - s_type = urllib_parse.urlparse(subscription['subscriber']).scheme - LOG.info('Begin to send %(type)s confirm/unsubscribe notification.' - ' The request body is %(messages)s', - {'type': s_type, 'messages': messages}) - - self._execute(s_type, subscription, [messages], conf) - - def _execute(self, s_type, subscription, messages, conf=None): - if self.subscription_controller: - data_driver = self.subscription_controller.driver - conf = data_driver.conf - else: - conf = conf - mgr = driver.DriverManager('zaqar.notification.tasks', - s_type, - invoke_on_load=True) - self.executor.submit(mgr.driver.execute, subscription, messages, - conf=conf) diff --git a/zaqar/notification/tasks/__init__.py b/zaqar/notification/tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/notification/tasks/mailto.py b/zaqar/notification/tasks/mailto.py deleted file mode 100644 index 1691b6b7..00000000 --- a/zaqar/notification/tasks/mailto.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from email.mime import text -import json -from six.moves import urllib_parse -import subprocess - -from oslo_log import log as logging - -from zaqar.i18n import _ -from zaqar.notification.notifier import MessageType - -LOG = logging.getLogger(__name__) - - -class MailtoTask(object): - - def _make_confirm_string(self, conf_n, message, queue_name): - confirm_url = conf_n.external_confirmation_url - if confirm_url is None: - msg = _("Can't make confirmation email body, need a valid " - "confirm url.") - LOG.error(msg) - raise Exception(msg) - param_string_signature = '?Signature=' + message.get('URL-Signature', - '') - param_string_methods = '&Methods=' + message.get('URL-Methods', '') - param_string_paths = '&Paths=' + message.get('URL-Paths', '') - param_string_project = '&Project=' + message.get('X-Project-ID', '') - param_string_expires = '&Expires=' + message.get('URL-Expires', '') - param_string_confirm_url = '&Url=' + message.get('WSGISubscribeURL', - '') - param_string_queue = '&Queue=' + queue_name - confirm_url_string = (confirm_url + param_string_signature + - param_string_methods + param_string_paths + - param_string_project + param_string_expires + - param_string_confirm_url + param_string_queue) - return confirm_url_string - - def _make_confirmation_email(self, body, subscription, message, conf_n): - queue_name = subscription['source'] - confirm_url = self._make_confirm_string(conf_n, message, - queue_name) - email_body = "" - if body is not None: - email_body = body.format(queue_name, message['X-Project-ID'], - confirm_url) - return text.MIMEText(email_body) - - def execute(self, subscription, messages, **kwargs): - subscriber = urllib_parse.urlparse(subscription['subscriber']) - params = urllib_parse.parse_qs(subscriber.query) - params = dict((k.lower(), v) for k, v in params.items()) - conf_n = kwargs.get('conf').notification - try: - for message in messages: - p = subprocess.Popen(conf_n.smtp_command.split(' '), - stdin=subprocess.PIPE) - # Send confirmation email to subscriber. - if (message.get('Message_Type') == - MessageType.SubscriptionConfirmation.name): - content = conf_n.subscription_confirmation_email_template - msg = self._make_confirmation_email(content['body'], - subscription, - message, conf_n) - msg["to"] = subscriber.path - msg["from"] = content['sender'] - msg["subject"] = content['topic'] - elif (message.get('Message_Type') == - MessageType.UnsubscribeConfirmation.name): - content = conf_n.unsubscribe_confirmation_email_template - msg = self._make_confirmation_email(content['body'], - subscription, - message, conf_n) - msg["to"] = subscriber.path - msg["from"] = content['sender'] - msg["subject"] = content['topic'] - else: - # NOTE(Eva-i): Unfortunately this will add 'queue_name' key - # to our original messages(dicts) which will be later - # consumed in the storage controller. It seems safe though. - message['queue_name'] = subscription['source'] - msg = text.MIMEText(json.dumps(message)) - msg["to"] = subscriber.path - msg["from"] = subscription['options'].get('from', '') - subject_opt = subscription['options'].get('subject', '') - msg["subject"] = params.get('subject', subject_opt) - p.communicate(msg.as_string()) - LOG.debug("Send mail successfully: %s", msg.as_string()) - except OSError as err: - LOG.exception('Failed to create process for sendmail, ' - 'because %s.', str(err)) - except Exception as exc: - LOG.exception('Failed to send email because %s.', str(exc)) - - def register(self, subscriber, options, ttl, project_id, request_data): - pass diff --git a/zaqar/notification/tasks/trust.py b/zaqar/notification/tasks/trust.py deleted file mode 100644 index 7989cfe2..00000000 --- a/zaqar/notification/tasks/trust.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import datetime - -from oslo_utils import timeutils - -from zaqar.common import auth -from zaqar.notification.tasks import webhook - - -class TrustTask(webhook.WebhookTask): - """A webhook using trust authentication. - - This webhook will use the trust stored in the subscription to ask for a - token, which will then be passed to the notified service. - """ - - def execute(self, subscription, messages, **kwargs): - subscription = copy.deepcopy(subscription) - subscriber = subscription['subscriber'] - - trust_id = subscription['options']['trust_id'] - token = auth.get_trusted_token(trust_id) - - subscription['subscriber'] = subscriber[6:] - headers = {'X-Auth-Token': token, - 'Content-Type': 'application/json'} - super(TrustTask, self).execute(subscription, messages, headers, - **kwargs) - - def register(self, subscriber, options, ttl, project_id, request_data): - if 'trust_id' not in options: - # We have a trust subscriber without a trust ID, - # create it - trustor_user_id = request_data.get('X-USER-ID') - roles = request_data.get('X-ROLES', '') - if roles: - roles = roles.split(',') - else: - roles = [] - auth_plugin = request_data.get('keystone.token_auth') - expires_at = None - if ttl: - expires_at = timeutils.utcnow() + datetime.timedelta( - seconds=ttl) - - trust_id = auth.create_trust_id( - auth_plugin, trustor_user_id, project_id, roles, - expires_at) - options['trust_id'] = trust_id diff --git a/zaqar/notification/tasks/webhook.py b/zaqar/notification/tasks/webhook.py deleted file mode 100644 index 0fc9b2e0..00000000 --- a/zaqar/notification/tasks/webhook.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from oslo_log import log as logging -import requests - -LOG = logging.getLogger(__name__) - - -class WebhookTask(object): - - def execute(self, subscription, messages, headers=None, **kwargs): - if headers is None: - headers = {'Content-Type': 'application/json'} - headers.update(subscription['options'].get('post_headers', {})) - try: - for msg in messages: - # NOTE(Eva-i): Unfortunately this will add 'queue_name' key to - # our original messages(dicts) which will be later consumed in - # the storage controller. It seems safe though. - msg['queue_name'] = subscription['source'] - if 'post_data' in subscription['options']: - data = subscription['options']['post_data'] - data = data.replace('"$zaqar_message$"', json.dumps(msg)) - else: - data = json.dumps(msg) - requests.post(subscription['subscriber'], - data=data, - headers=headers) - except Exception as e: - LOG.exception('webhook task got exception: %s.', str(e)) - - def register(self, subscriber, options, ttl, project_id, request_data): - pass diff --git a/zaqar/storage/__init__.py b/zaqar/storage/__init__.py deleted file mode 100644 index 795647b1..00000000 --- a/zaqar/storage/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Zaqar Storage Drivers""" - -from zaqar.storage import base -from zaqar.storage import errors # NOQA - -# Hoist classes into package namespace -Capabilities = base.Capabilities -ControlDriverBase = base.ControlDriverBase -DataDriverBase = base.DataDriverBase -CatalogueBase = base.CatalogueBase -Claim = base.Claim -Message = base.Message -Queue = base.Queue -Subscription = base.Subscription -PoolsBase = base.PoolsBase -FlavorsBase = base.FlavorsBase - -DEFAULT_QUEUES_PER_PAGE = base.DEFAULT_QUEUES_PER_PAGE -DEFAULT_MESSAGES_PER_PAGE = base.DEFAULT_MESSAGES_PER_PAGE -DEFAULT_POOLS_PER_PAGE = base.DEFAULT_POOLS_PER_PAGE -DEFAULT_SUBSCRIPTIONS_PER_PAGE = base.DEFAULT_SUBSCRIPTIONS_PER_PAGE - -DEFAULT_MESSAGES_PER_CLAIM = base.DEFAULT_MESSAGES_PER_CLAIM diff --git a/zaqar/storage/base.py b/zaqar/storage/base.py deleted file mode 100644 index 805984d4..00000000 --- a/zaqar/storage/base.py +++ /dev/null @@ -1,1086 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# Copyright 2014 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements the DriverBase abstract class for Zaqar storage drivers.""" - -import abc -import functools -import time - -import enum -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils -import six - -from zaqar.common import decorators -from zaqar.storage import errors -from zaqar.storage import utils - - -DEFAULT_QUEUES_PER_PAGE = 10 -DEFAULT_MESSAGES_PER_PAGE = 10 -DEFAULT_POOLS_PER_PAGE = 10 -DEFAULT_SUBSCRIPTIONS_PER_PAGE = 10 - -DEFAULT_MESSAGES_PER_CLAIM = 10 - -LOG = logging.getLogger(__name__) - - -@enum.unique -class Capabilities(enum.IntEnum): - """Enum of storage capabilities.""" - FIFO = 1 - CLAIMS = 2 - DURABILITY = 3 - AOD = 4 # At least once delivery - HIGH_THROUGHPUT = 5 - - -@six.add_metaclass(abc.ABCMeta) -class DriverBase(object): - """Base class for both data and control plane drivers - - :param conf: Configuration containing options for this driver. - :type conf: `oslo_config.ConfigOpts` - :param cache: Cache instance to use for reducing latency - for certain lookups. - :type cache: `dogpile.cache.region.CacheRegion` - """ - _DRIVER_OPTIONS = [] - - def __init__(self, conf, cache): - self.conf = conf - self.cache = cache - self._register_opts() - - def _register_opts(self): - for group, options in self._DRIVER_OPTIONS: - for opt in options: - try: - self.conf.register_opt(opt, group=group) - except cfg.DuplicateOptError: - pass - - -@six.add_metaclass(abc.ABCMeta) -class DataDriverBase(DriverBase): - """Interface definition for storage drivers. - - Data plane storage drivers are responsible for implementing the - core functionality of the system. - - Connection information and driver-specific options are - loaded from the config file or the pool catalog. - - :param conf: Configuration containing options for this driver. - :type conf: `oslo_config.ConfigOpts` - :param cache: Cache instance to use for reducing latency - for certain lookups. - :type cache: `dogpile.cache.region.CacheRegion` - """ - - BASE_CAPABILITIES = [] - - def __init__(self, conf, cache, control_driver): - super(DataDriverBase, self).__init__(conf, cache) - # creating ControlDriver instance for accessing QueueController's - # data from DataDriver - self.control_driver = control_driver - - @abc.abstractmethod - def is_alive(self): - """Check whether the storage is ready.""" - raise NotImplementedError - - @abc.abstractproperty - def capabilities(self): - """Returns storage's capabilities.""" - return self.BASE_CAPABILITIES - - def health(self): - """Return the health status of service.""" - overall_health = {} - # NOTE(flwang): KPI extracted from different storage backends, - # _health() will be implemented by different storage drivers. - backend_health = self._health() - if backend_health: - overall_health.update(backend_health) - - return overall_health - - @abc.abstractmethod - def _health(self): - """Return the health status based on different backends.""" - raise NotImplementedError - - @abc.abstractmethod - def close(self): - """Close connections to the backend.""" - raise NotImplementedError - - def _get_operation_status(self): - op_status = {} - status_template = lambda s, t, r: {'succeeded': s, - 'seconds': t, - 'ref': r} - project = uuidutils.generate_uuid() - queue = uuidutils.generate_uuid() - client = uuidutils.generate_uuid() - msg_template = lambda s: {'ttl': 600, 'body': {'event': 'p_%s' % s}} - messages = [msg_template(i) for i in range(100)] - claim_metadata = {'ttl': 60, 'grace': 300} - - # NOTE (flwang): Using time.time() instead of timeit since timeit will - # make the method calling be complicated. - def _handle_status(operation_type, callable_operation): - succeeded = True - ref = None - result = None - try: - start = time.time() - result = callable_operation() - except Exception as e: - ref = uuidutils.generate_uuid() - LOG.exception(e, extra={'instance_uuid': ref}) - succeeded = False - status = status_template(succeeded, time.time() - start, ref) - op_status[operation_type] = status - return succeeded, result - - # create queue - func = functools.partial(self.queue_controller.create, - queue, project=project) - succeeded, _ = _handle_status('create_queue', func) - - # post messages - if succeeded: - func = functools.partial(self.message_controller.post, - queue, messages, client, project=project) - _, msg_ids = _handle_status('post_messages', func) - - # claim messages - if msg_ids: - func = functools.partial(self.claim_controller.create, - queue, claim_metadata, - project=project) - _, (claim_id, claim_msgs) = _handle_status('claim_messages', - func) - - # list messages - func = functools.partial(self.message_controller.list, - queue, project, echo=True, - client_uuid=client, - include_claimed=True) - _handle_status('list_messages', func) - - # delete messages - if claim_id and claim_msgs: - for message in claim_msgs: - func = functools.partial(self. - message_controller.delete, - queue, message['id'], - project, claim=claim_id) - succeeded, _ = _handle_status('delete_messages', func) - if not succeeded: - break - # delete claim - func = functools.partial(self.claim_controller.delete, - queue, claim_id, project) - _handle_status('delete_claim', func) - - # delete queue - func = functools.partial(self.message_controller.bulk_delete, - queue, msg_ids, project=project) - _handle_status('bulk_delete_messages', func) - func = functools.partial(self.queue_controller.delete, - queue, project=project) - _handle_status('delete_queue', func) - return op_status - - def gc(self): - """Perform manual garbage collection of claims and messages. - - This method can be overridden in order to provide a trigger - that can be called by so-called "garbage collection" scripts - that are required by some drivers. - - By default, this method does nothing. - """ - pass - - @decorators.lazy_property(write=False) - def queue_controller(self): - return self.control_driver.queue_controller - - @abc.abstractproperty - def message_controller(self): - """Returns the driver's message controller.""" - raise NotImplementedError - - @abc.abstractproperty - def claim_controller(self): - """Returns the driver's claim controller.""" - raise NotImplementedError - - @abc.abstractproperty - def subscription_controller(self): - """Returns the driver's subscription controller.""" - raise NotImplementedError - - -@six.add_metaclass(abc.ABCMeta) -class ControlDriverBase(DriverBase): - """Interface definition for control plane storage drivers. - - Storage drivers that work at the control plane layer allow one to - modify aspects of the functionality of the system. This is ideal - for administrative purposes. - - Allows access to the pool registry through a catalogue and a - pool controller. - - :param conf: Configuration containing options for this driver. - :type conf: `oslo_config.ConfigOpts` - :param cache: Cache instance to use for reducing latency - for certain lookups. - :type cache: `dogpile.cache.region.CacheRegion` - """ - - @abc.abstractproperty - def catalogue_controller(self): - """Returns the driver's catalogue controller.""" - raise NotImplementedError - - @abc.abstractproperty - def pools_controller(self): - """Returns storage's pool management controller.""" - raise NotImplementedError - - @abc.abstractproperty - def flavors_controller(self): - """Returns storage's flavor management controller.""" - raise NotImplementedError - - @abc.abstractproperty - def queue_controller(self): - """Returns the driver's queue controller.""" - raise NotImplementedError - - @abc.abstractmethod - def close(self): - """Close connections to the backend.""" - raise NotImplementedError - - -class ControllerBase(object): - """Top-level class for controllers. - - :param driver: Instance of the driver - instantiating this controller. - """ - - def __init__(self, driver): - self.driver = driver - - -@six.add_metaclass(abc.ABCMeta) -class Queue(ControllerBase): - """This class is responsible for managing queues. - - Queue operations include CRUD, monitoring, etc. - - Storage driver implementations of this class should - be capable of handling high workloads and huge - numbers of queues. - """ - - def list(self, project=None, marker=None, - limit=DEFAULT_QUEUES_PER_PAGE, detailed=False): - """Base method for listing queues. - - :param project: Project id - :param marker: The last queue name - :param limit: (Default 10) Max number of queues to return - :param detailed: Whether metadata is included - - :returns: An iterator giving a sequence of queues - and the marker of the next page. - """ - return self._list(project, marker, limit, detailed) - - _list = abc.abstractmethod(lambda x: None) - - def get(self, name, project=None): - """Base method for queue metadata retrieval. - - :param name: The queue name - :param project: Project id - - :returns: Dictionary containing queue metadata - :raises DoesNotExist: if queue metadata does not exist - """ - return self._get(name, project) - - _get = abc.abstractmethod(lambda x: None) - - def get_metadata(self, name, project=None): - """Base method for queue metadata retrieval. - - :param name: The queue name - :param project: Project id - - :returns: Dictionary containing queue metadata - :raises DoesNotExist: if queue metadata does not exist - """ - raise NotImplementedError - - def set_metadata(self, name, metadata, project=None): - """Base method for updating a queue metadata. - - :param name: The queue name - :param metadata: Queue metadata as a dict - :param project: Project id - :raises DoesNotExist: if queue metadata can not be updated - """ - raise NotImplementedError - - def create(self, name, metadata=None, project=None): - """Base method for queue creation. - - :param name: The queue name - :param project: Project id - :returns: True if a queue was created and False - if it was updated. - """ - return self._create(name, metadata, project) - - _create = abc.abstractmethod(lambda x: None) - - def exists(self, name, project=None): - """Base method for testing queue existence. - - :param name: The queue name - :param project: Project id - :returns: True if a queue exists and False - if it does not. - """ - return self._exists(name, project) - - _exists = abc.abstractmethod(lambda x: None) - - def delete(self, name, project=None): - """Base method for deleting a queue. - - :param name: The queue name - :param project: Project id - """ - return self._delete(name, project) - - _delete = abc.abstractmethod(lambda x: None) - - def stats(self, name, project=None): - """Base method for queue stats. - - :param name: The queue name - :param project: Project id - :returns: Dictionary with the - queue stats - """ - return self._stats(name, project) - - _stats = abc.abstractmethod(lambda x: None) - - -@six.add_metaclass(abc.ABCMeta) -class Message(ControllerBase): - """This class is responsible for managing message CRUD.""" - - @abc.abstractmethod - def list(self, queue, project=None, marker=None, - limit=DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, - include_claimed=False): - """Base method for listing messages. - - :param queue: Name of the queue to get the - message from. - :param project: Project id - :param marker: Tail identifier - :param limit: (Default 10) Max number of messages to return. - :type limit: Maybe int - :param echo: (Default False) Boolean expressing whether - or not this client should receive its own messages. - :param client_uuid: A UUID object. Required when echo=False. - :param include_claimed: omit claimed messages from listing? - :type include_claimed: bool - - :returns: An iterator giving a sequence of messages and - the marker of the next page. - """ - raise NotImplementedError - - @abc.abstractmethod - def first(self, queue, project=None, sort=1): - """Get first message in the queue (including claimed). - - :param queue: Name of the queue to list - :param sort: (Default 1) Sort order for the listing. Pass 1 for - ascending (oldest message first), or -1 for descending (newest - message first). - - :returns: First message in the queue, or None if the queue is - empty - """ - raise NotImplementedError - - @abc.abstractmethod - def get(self, queue, message_id, project=None): - """Base method for getting a message. - - :param queue: Name of the queue to get the - message from. - :param project: Project id - :param message_id: Message ID - - :returns: Dictionary containing message data - :raises DoesNotExist: if message data can not be got - """ - raise NotImplementedError - - @abc.abstractmethod - def bulk_get(self, queue, message_ids, project=None): - """Base method for getting multiple messages. - - :param queue: Name of the queue to get the - message from. - :param project: Project id - :param message_ids: A sequence of message IDs. - - :returns: An iterable, yielding dicts containing - message details - """ - raise NotImplementedError - - @abc.abstractmethod - def post(self, queue, messages, client_uuid, project=None): - """Base method for posting one or more messages. - - Implementations of this method should guarantee - and preserve the order, in the returned list, of - incoming messages. - - :param queue: Name of the queue to post message to. - :param messages: Messages to post to queue, an iterable - yielding 1 or more elements. An empty iterable - results in undefined behavior. - :param client_uuid: A UUID object. - :param project: Project id - - :returns: List of message ids - """ - raise NotImplementedError - - @abc.abstractmethod - def delete(self, queue, message_id, project=None, claim=None): - """Base method for deleting a single message. - - :param queue: Name of the queue to post - message to. - :param message_id: Message to be deleted - :param project: Project id - :param claim: Claim this message - belongs to. When specified, claim must - be valid and message_id must belong to - it. - """ - raise NotImplementedError - - @abc.abstractmethod - def bulk_delete(self, queue, message_ids, project=None): - """Base method for deleting multiple messages. - - :param queue: Name of the queue to post - message to. - :param message_ids: A sequence of message IDs - to be deleted. - :param project: Project id - """ - raise NotImplementedError - - @abc.abstractmethod - def pop(self, queue, limit, project=None): - """Base method for popping messages. - - :param queue: Name of the queue to pop - message from. - :param limit: Number of messages to pop. - :param project: Project id - """ - raise NotImplementedError - - -@six.add_metaclass(abc.ABCMeta) -class Claim(ControllerBase): - - @abc.abstractmethod - def get(self, queue, claim_id, project=None): - """Base method for getting a claim. - - :param queue: Name of the queue this - claim belongs to. - :param claim_id: The claim id - :param project: Project id - - :returns: (Claim's metadata, claimed messages) - :raises DoesNotExist: if claimed messages can not be got - """ - raise NotImplementedError - - @abc.abstractmethod - def create(self, queue, metadata, project=None, - limit=DEFAULT_MESSAGES_PER_CLAIM): - """Base method for creating a claim. - - :param queue: Name of the queue this - claim belongs to. - :param metadata: Claim's parameters - to be stored. - :param project: Project id - :param limit: (Default 10) Max number - of messages to claim. - - :returns: (Claim ID, claimed messages) - """ - raise NotImplementedError - - @abc.abstractmethod - def update(self, queue, claim_id, metadata, project=None): - """Base method for updating a claim. - - :param queue: Name of the queue this - claim belongs to. - :param claim_id: Claim to be updated - :param metadata: Claim's parameters - to be updated. - :param project: Project id - """ - raise NotImplementedError - - @abc.abstractmethod - def delete(self, queue, claim_id, project=None): - """Base method for deleting a claim. - - :param queue: Name of the queue this - claim belongs to. - :param claim_id: Claim to be deleted - :param project: Project id - """ - raise NotImplementedError - - -@six.add_metaclass(abc.ABCMeta) -class Subscription(ControllerBase): - """This class is responsible for managing subscriptions of notification. - - """ - - @abc.abstractmethod - def list(self, queue, project=None, marker=None, - limit=DEFAULT_SUBSCRIPTIONS_PER_PAGE): - """Base method for listing subscriptions. - - :param queue: Name of the queue to get the subscriptions from. - :type queue: six.text_type - :param project: Project this subscription belongs to. - :type project: six.text_type - :param marker: used to determine which subscription to start with - :type marker: six.text_type - :param limit: (Default 10) Max number of results to return - :type limit: int - :returns: An iterator giving a sequence of subscriptions - and the marker of the next page. - :rtype: [{}] - """ - raise NotImplementedError - - @abc.abstractmethod - def get(self, queue, subscription_id, project=None): - """Returns a single subscription entry. - - :param queue: Name of the queue subscription belongs to. - :type queue: six.text_type - :param subscription_id: ID of this subscription - :type subscription_id: six.text_type - :param project: Project this subscription belongs to. - :type project: six.text_type - :returns: Dictionary containing subscription data - :rtype: {} - :raises SubscriptionDoesNotExist: if not found - """ - raise NotImplementedError - - @abc.abstractmethod - def create(self, queue, subscriber, ttl, options, project=None): - """Create a new subscription. - - :param queue:The source queue for notifications - :type queue: six.text_type - :param subscriber: The subscriber URI - :type subscriber: six.text_type - :param ttl: time to live for this subscription - :type ttl: int - :param options: Options used to configure this subscription - :type options: dict - :param project: Project id - :type project: six.text_type - :returns: True if a subscription was created and False - if it is failed. - :rtype: boolean - """ - raise NotImplementedError - - @abc.abstractmethod - def update(self, queue, subscription_id, project=None, **kwargs): - """Updates the weight, uris, and/or options of this subscription - - :param queue: Name of the queue subscription belongs to. - :type queue: six.text_type - :param name: ID of the subscription - :type name: text - :param kwargs: one of: `source`, `subscriber`, `ttl`, `options` - :type kwargs: dict - :raises SubscriptionDoesNotExist: if not found - :raises SubscriptionAlreadyExists: if attempt to update in a way to - create duplicate subscription - """ - - raise NotImplementedError - - @abc.abstractmethod - def exists(self, queue, subscription_id, project=None): - """Base method for testing subscription existence. - - :param queue: Name of the queue subscription belongs to. - :type queue: six.text_type - :param subscription_id: ID of subscription - :type subscription_id: six.text_type - :param project: Project id - :type project: six.text_type - :returns: True if a subscription exists and False - if it does not. - """ - raise NotImplementedError - - @abc.abstractmethod - def delete(self, queue, subscription_id, project=None): - """Base method for deleting a subscription. - - :param queue: Name of the queue subscription belongs to. - :type queue: six.text_type - :param subscription_id: ID of the subscription to be deleted. - :type subscription_id: six.text_type - :param project: Project id - :type project: six.text_type - """ - raise NotImplementedError - - @abc.abstractmethod - def get_with_subscriber(self, queue, subscriber, project=None): - """Base method for get a subscription with the subscriber. - - :param queue: Name of the queue subscription belongs to. - :type queue: six.text_type - :param subscriber: link of the subscription to be notified. - :type subscriber: six.text_type - :param project: Project id - :type project: six.text_type - :returns: Dictionary containing subscription data - :rtype: dict - """ - raise NotImplementedError - - @abc.abstractmethod - def confirm(self, queue, subscription_id, project=None, confirmed=True): - """Base method for confirming a subscription. - - :param queue: Name of the queue subscription belongs to. - :type queue: six.text_type - :param subscription_id: ID of the subscription to be deleted. - :type subscription_id: six.text_type - :param project: Project id - :type project: six.text_type - :param confirmed: Confirm a subscription or cancel the confirmation of - a subscription. - :type confirmed: boolean - """ - raise NotImplementedError - - -@six.add_metaclass(abc.ABCMeta) -class PoolsBase(ControllerBase): - """A controller for managing pools.""" - - def _check_capabilities(self, uri, group=None, name=None): - default_store = self.driver.conf.drivers.message_store - pool_caps = self.capabilities(group=group, name=name) - - if not pool_caps: - return True - - new_store = utils.load_storage_impl(uri, - default_store=default_store) - - # NOTE(flaper87): Since all pools in a pool group - # are assumed to have the same capabilities, it's - # fine to check against just 1 - return pool_caps == new_store.BASE_CAPABILITIES - - def capabilities(self, group=None, name=None): - """Gets the set of capabilities for this group/name - - :param group: The pool group to get capabilities for - :type group: six.text_type - :param name: The pool name to get capabilities for - :type name: six.text_type - """ - if name: - group = list(self._get_pools_by_group(self._get(name)['group'])) - else: - group = list(self._get_pools_by_group(group)) - - if not len(group) > 0: - return () - - default_store = self.driver.conf.drivers.message_store - - pool_store = utils.load_storage_impl(group[0]['uri'], - default_store=default_store) - - return pool_store.BASE_CAPABILITIES - - def list(self, marker=None, limit=DEFAULT_POOLS_PER_PAGE, - detailed=False): - """Lists all registered pools. - - :param marker: used to determine which pool to start with - :type marker: six.text_type - :param limit: (Default 10) Max number of results to return - :type limit: int - :param detailed: whether to include options - :type detailed: bool - :returns: A list of pools - name, weight, uri - :rtype: [{}] - """ - - return self._list(marker, limit, detailed) - - _list = abc.abstractmethod(lambda x: None) - - def create(self, name, weight, uri, group=None, options=None): - """Registers a pool entry. - - :param name: The name of this pool - :type name: six.text_type - :param weight: the likelihood that this pool will be used - :type weight: int - :param uri: A URI that can be used by a storage client - (e.g., pymongo) to access this pool. - :type uri: six.text_type - :param group: The group of this pool - :type group: six.text_type - :param options: Options used to configure this pool - :type options: dict - """ - if not self._check_capabilities(uri, group=group): - raise errors.PoolCapabilitiesMismatch() - - return self._create(name, weight, uri, group, options) - - _create = abc.abstractmethod(lambda x: None) - - def get_pools_by_group(self, group=None, detailed=False): - """Returns a pool list filtered by given pool group. - - :param group: The group to filter on. `None` returns - pools that are not assigned to any pool group. - :type group: six.text_type - :param detailed: Should the options data be included? - :type detailed: bool - :returns: weight, uri, and options for this pool - :rtype: {} - :raises PoolDoesNotExist: if not found - """ - return self._get_pools_by_group(group, detailed) - - _get_pools_by_group = abc.abstractmethod(lambda x: None) - - def get(self, name, detailed=False): - """Returns a single pool entry. - - :param name: The name of this pool - :type name: six.text_type - :param detailed: Should the options data be included? - :type detailed: bool - :returns: weight, uri, and options for this pool - :rtype: {} - :raises PoolDoesNotExist: if not found - """ - return self._get(name, detailed) - - _get = abc.abstractmethod(lambda x: None) - - def exists(self, name): - """Returns a single pool entry. - - :param name: The name of this pool - :type name: six.text_type - :returns: True if the pool exists - :rtype: bool - """ - return self._exists(name) - - _exists = abc.abstractmethod(lambda x: None) - - def delete(self, name): - """Removes a pool entry. - - :param name: The name of this pool - :type name: six.text_type - :rtype: None - """ - return self._delete(name) - - _delete = abc.abstractmethod(lambda x: None) - - def update(self, name, **kwargs): - """Updates the weight, uris, and/or options of this pool - - :param name: Name of the pool - :type name: text - :param kwargs: one of: `uri`, `weight`, `options` - :type kwargs: dict - :raises PoolDoesNotExist: if not found - """ - uri = kwargs.get('uri') - if uri and not self._check_capabilities(uri, name=name): - raise errors.PoolCapabilitiesMismatch() - - return self._update(name, **kwargs) - - _update = abc.abstractmethod(lambda x: None) - - def drop_all(self): - """Deletes all pools from storage.""" - return self._drop_all() - - _drop_all = abc.abstractmethod(lambda x: None) - - -@six.add_metaclass(abc.ABCMeta) -class CatalogueBase(ControllerBase): - """A controller for managing the catalogue. - - The catalogue is responsible for maintaining a mapping - between project.queue entries to their pool. - """ - - @abc.abstractmethod - def list(self, project): - """Get a list of queues from the catalogue. - - :param project: The project to use when filtering through queue - entries. - :type project: six.text_type - :returns: [{'project': ..., 'queue': ..., 'pool': ...},] - :rtype: [dict] - """ - - raise NotImplementedError - - @abc.abstractmethod - def get(self, project, queue): - """Returns the pool identifier for the given queue. - - :param project: Namespace to search for the given queue - :type project: six.text_type - :param queue: The name of the queue to search for - :type queue: six.text_type - :returns: {'pool': ...} - :rtype: dict - :raises QueueNotMapped: if queue is not mapped - """ - - raise NotImplementedError - - @abc.abstractmethod - def exists(self, project, queue): - """Determines whether the given queue exists under project. - - :param project: Namespace to check. - :type project: six.text_type - :param queue: str - Particular queue to check for - :type queue: six.text_type - :return: True if the queue exists under this project - :rtype: bool - """ - - @abc.abstractmethod - def insert(self, project, queue, pool): - """Creates a new catalogue entry, or updates it if it already exists. - - :param project: str - Namespace to insert the given queue into - :type project: six.text_type - :param queue: str - The name of the queue to insert - :type queue: six.text_type - :param pool: pool identifier to associate this queue with - :type pool: six.text_type - """ - - raise NotImplementedError - - @abc.abstractmethod - def delete(self, project, queue): - """Removes this entry from the catalogue. - - :param project: The namespace to search for this queue - :type project: six.text_type - :param queue: The queue name to remove - :type queue: six.text_type - """ - - raise NotImplementedError - - @abc.abstractmethod - def update(self, project, queue, pools=None): - """Updates the pool identifier for this queue. - - :param project: Namespace to search - :type project: six.text_type - :param queue: The name of the queue - :type queue: six.text_type - :param pools: The name of the pool where this project/queue lives. - :type pools: six.text_type - :raises QueueNotMapped: if queue is not mapped - """ - - raise NotImplementedError - - @abc.abstractmethod - def drop_all(self): - """Drops all catalogue entries from storage.""" - - raise NotImplementedError - - -@six.add_metaclass(abc.ABCMeta) -class FlavorsBase(ControllerBase): - """A controller for managing flavors.""" - - @abc.abstractmethod - def list(self, project=None, marker=None, limit=10): - """Lists all registered flavors. - - :param project: Project this flavor belongs to. - :type project: six.text_type - :param marker: used to determine which flavor to start with - :type marker: six.text_type - :param limit: (Default 10) Max number of results to return - :type limit: int - :returns: A list of flavors - name, project, flavor - :rtype: [{}] - """ - - raise NotImplementedError - - @abc.abstractmethod - def create(self, name, pool, project=None, capabilities=None): - """Registers a flavor entry. - - :param name: The name of this flavor - :type name: six.text_type - :param project: Project this flavor belongs to. - :type project: six.text_type - :param pool: The name of the pool to use for this flavor. - :type pool: six.text_type - :param capabilities: Flavor capabilities - :type capabilities: dict - """ - - raise NotImplementedError - - @abc.abstractmethod - def get(self, name, project=None): - """Returns a single flavor entry. - - :param name: The name of this flavor - :type name: six.text_type - :param project: Project this flavor belongs to. - :type project: six.text_type - :rtype: {} - :raises FlavorDoesNotExist: if not found - """ - - raise NotImplementedError - - @abc.abstractmethod - def exists(self, name, project=None): - """Verifies whether the flavor exists. - - :param name: The name of this flavor - :type name: six.text_type - :param project: Project this flavor belongs to. - :type project: six.text_type - :returns: True if the flavor exists - :rtype: bool - """ - - raise NotImplementedError - - @abc.abstractmethod - def delete(self, name, project=None): - """Removes a flavor entry. - - :param name: The name of this flavor - :type name: six.text_type - :param project: Project this flavor belongs to. - :type project: six.text_type - :rtype: None - """ - - raise NotImplementedError - - @abc.abstractmethod - def update(self, name, project=None, **kwargs): - """Updates the flavor and/or capabilities of this flavor - - :param name: Name of the flavor - :type name: text - :param project: Project this flavor belongs to. - :type project: six.text_type - :param kwargs: one of: `uri`, `weight`, `options` - :type kwargs: dict - :raises FlavorDoesNotExist: if not found - """ - - raise NotImplementedError - - @abc.abstractmethod - def drop_all(self): - """Deletes all flavors from storage.""" - - raise NotImplementedError diff --git a/zaqar/storage/configuration.py b/zaqar/storage/configuration.py deleted file mode 100644 index 83dc562a..00000000 --- a/zaqar/storage/configuration.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo_config import cfg - - -class Configuration(object): - - def __init__(self, conf): - """Initialize configuration.""" - self.local_conf = conf - - def register_opts(self, volume_opts, group=None): - self.local_conf.register_opts(volume_opts, group=group) - - def set_override(self, name, override, group=None, enforce_type=False): - self.local_conf.set_override(name, override, group=group, - enforce_type=enforce_type) - - def safe_get(self, value): - try: - return self.__getattr__(value) - except cfg.NoSuchOptError: - return None - - def __contains__(self, key): - """Return True if key is in local_conf.""" - return key in self.local_conf - - def __getattr__(self, value): - # Don't use self.local_conf to avoid reentrant call to __getattr__() - local_conf = object.__getattribute__(self, 'local_conf') - return getattr(local_conf, value) - - def __getitem__(self, key): - """Look up an option value and perform string substitution.""" - return self.local_conf.__getitem__(key) diff --git a/zaqar/storage/errors.py b/zaqar/storage/errors.py deleted file mode 100644 index 0a534927..00000000 --- a/zaqar/storage/errors.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class ExceptionBase(Exception): - - msg_format = '' - - def __init__(self, **kwargs): - msg = self.msg_format.format(**kwargs) - super(ExceptionBase, self).__init__(msg) - - -class ConnectionError(ExceptionBase): - """Raised when the connection with the back-end was lost.""" - - -class DoesNotExist(ExceptionBase): - """Resource does not exist.""" - - -class NotPermitted(ExceptionBase): - """Operation not permitted.""" - - -class Conflict(ExceptionBase): - """Resource could not be created due to a conflict.""" - - -class MessageConflict(Conflict): - - msg_format = (u'Message could not be enqueued due to a conflict ' - u'with one or more other messages that are already in ' - u'queue {queue} for project {project}') - - def __init__(self, queue, project): - """Initializes the error with contextual information. - - :param queue: name of the queue to which the message was posted - - :param project: name of the project to which the queue belongs - """ - - super(MessageConflict, self).__init__(queue=queue, project=project) - - -class ClaimConflict(Conflict): - - msg_format = (u'Messages could not be claimed due to a conflict ' - u'with another parallel claim that is already in ' - u'queue {queue} for project {project}') - - def __init__(self, queue, project): - """Initializes the error with contextual information. - - :param queue: name of the queue to which the message was posted - :param project: name of the project to which the queue belongs - """ - - super(ClaimConflict, self).__init__(queue=queue, project=project) - - -class QueueDoesNotExist(DoesNotExist): - - msg_format = u'Queue {name} does not exist for project {project}' - - def __init__(self, name, project): - super(QueueDoesNotExist, self).__init__(name=name, project=project) - - -class QueueIsEmpty(ExceptionBase): - - msg_format = u'Queue {name} in project {project} is empty' - - def __init__(self, name, project): - super(QueueIsEmpty, self).__init__(name=name, project=project) - - -class MessageDoesNotExist(DoesNotExist): - - msg_format = (u'Message {mid} does not exist in ' - u'queue {queue} for project {project}') - - def __init__(self, mid, queue, project): - super(MessageDoesNotExist, self).__init__(mid=mid, queue=queue, - project=project) - - -class ClaimDoesNotExist(DoesNotExist): - - msg_format = (u'Claim {cid} does not exist in ' - u'queue {queue} for project {project}') - - def __init__(self, cid, queue, project): - super(ClaimDoesNotExist, self).__init__(cid=cid, queue=queue, - project=project) - - -class MessageIsClaimed(NotPermitted): - - msg_format = u'Message {mid} is claimed' - - def __init__(self, mid): - super(MessageIsClaimed, self).__init__(mid=mid) - - -class MessageNotClaimed(NotPermitted): - - msg_format = u'Message {mid} is no longer claimed' - - def __init__(self, mid): - super(MessageNotClaimed, self).__init__(mid=mid) - - -class MessageNotClaimedBy(NotPermitted): - - msg_format = u'Message {mid} is not claimed by {cid}' - - def __init__(self, mid, cid): - super(MessageNotClaimedBy, self).__init__(cid=cid, mid=mid) - - -class QueueNotMapped(DoesNotExist): - - msg_format = (u'No pool found for ' - u'queue {queue} for project {project}') - - def __init__(self, queue, project): - super(QueueNotMapped, self).__init__(queue=queue, project=project) - - -class PoolDoesNotExist(DoesNotExist): - - msg_format = u'Pool {pool} does not exist' - - def __init__(self, pool): - super(PoolDoesNotExist, self).__init__(pool=pool) - - -class PoolGroupDoesNotExist(DoesNotExist): - - msg_format = u'Pool group {pool_group} does not exist' - - def __init__(self, pool_group): - super(PoolGroupDoesNotExist, self).__init__(pool_group=pool_group) - - -class FlavorDoesNotExist(DoesNotExist): - - msg_format = u'Flavor {flavor} does not exist' - - def __init__(self, flavor): - super(FlavorDoesNotExist, self).__init__(flavor=flavor) - - -class NoPoolFound(ExceptionBase): - - msg_format = u'No pools registered' - - def __init__(self): - super(NoPoolFound, self).__init__() - - -class PoolInUseByFlavor(NotPermitted): - - msg_format = u'Pool {pid} is in use by flavor {fid}' - - def __init__(self, pid, fid): - super(PoolInUseByFlavor, self).__init__(pid=pid, fid=fid) - self._flavor = fid - - @property - def flavor(self): - return self._flavor - - -class SubscriptionDoesNotExist(DoesNotExist): - - msg_format = u'Subscription {subscription_id} does not exist' - - def __init__(self, subscription_id): - super(SubscriptionDoesNotExist, - self).__init__(subscription_id=subscription_id) - - -class PoolCapabilitiesMismatch(ExceptionBase): - - msg_format = (u'The pool being added does not ' - u'support the minimum set of capabilities') - - -class PoolAlreadyExists(Conflict): - - msg_format = u'The database URI is in use by another pool.' - - -class SubscriptionAlreadyExists(Conflict): - - msg_format = (u'Such subscription already exists. Subscriptions ' - u'are unique by project + queue + subscriber URI.') diff --git a/zaqar/storage/mongodb/__init__.py b/zaqar/storage/mongodb/__init__.py deleted file mode 100644 index e63e8728..00000000 --- a/zaqar/storage/mongodb/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r""" -MongoDB Storage Driver for Zaqar. - -About the store ---------------- - -MongoDB is a nosql, eventually consistent, reliable database with support for -horizontal-scaling and capable of handling different levels of throughputs. - -Supported Features ------------------- - -- FIFO -- Unlimited horizontal-scaling [1]_ -- Reliability [2]_ - -.. [1] This is only possible with a sharding environment -.. [2] Write concern must be equal or higher than 2 - -Supported Deployments ---------------------- - -MongoDB can be deployed in 3 different ways. The first and most simple one is -to deploy a standalone `mongod` node. The second one is to use a Replica Sets -which gives a master-slave deployment but cannot be scaled unlimitedly. The -third and last one is a sharded cluster. - -The second and third methods are the ones recommended for production -environments where durability and scalability are a must-have. The driver -itself forces operators to use such environments by checking whether it is -talking to a replica-set or sharded cluster. Such enforcement can be disabled -by running Zaqar in an unreliable mode. - -Replica Sets ------------- - -When running on a replica-set, Zaqar won't try to be smart and it'll rely as -much as possible on the database and pymongo. - -Sharded Cluster ---------------- - -TBD -""" - -from zaqar.storage.mongodb import driver - -# Hoist classes into package namespace -ControlDriver = driver.ControlDriver -DataDriver = driver.DataDriver -FIFODataDriver = driver.FIFODataDriver diff --git a/zaqar/storage/mongodb/catalogue.py b/zaqar/storage/mongodb/catalogue.py deleted file mode 100644 index f20ea7f7..00000000 --- a/zaqar/storage/mongodb/catalogue.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB storage controller for the queues catalogue. - -Serves to construct an association between a project + queue -> pool. - -:: - - { - 'p_q': project_queue :: six.text_type, - 's': pool_identifier :: six.text_type - } -""" - -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - - -PRIMARY_KEY = utils.PROJ_QUEUE_KEY - -CATALOGUE_INDEX = [ - (PRIMARY_KEY, 1) -] - - -class CatalogueController(base.CatalogueBase): - - def __init__(self, *args, **kwargs): - super(CatalogueController, self).__init__(*args, **kwargs) - - self._col = self.driver.database.catalogue - self._col.ensure_index(CATALOGUE_INDEX, unique=True) - - @utils.raises_conn_error - def _insert(self, project, queue, pool, upsert): - key = utils.scope_queue_name(queue, project) - return self._col.update({PRIMARY_KEY: key}, - {'$set': {'s': pool}}, upsert=upsert) - - @utils.raises_conn_error - def list(self, project): - fields = {'_id': 0} - - query = utils.scoped_query(None, project) - return utils.HookedCursor(self._col.find(query, fields), - _normalize) - - @utils.raises_conn_error - def get(self, project, queue): - fields = {'_id': 0} - key = utils.scope_queue_name(queue, project) - entry = self._col.find_one({PRIMARY_KEY: key}, - projection=fields) - - if entry is None: - raise errors.QueueNotMapped(queue, project) - - return _normalize(entry) - - @utils.raises_conn_error - def exists(self, project, queue): - key = utils.scope_queue_name(queue, project) - return self._col.find_one({PRIMARY_KEY: key}) is not None - - def insert(self, project, queue, pool): - # NOTE(cpp-cabrera): _insert handles conn_error - self._insert(project, queue, pool, upsert=True) - - @utils.raises_conn_error - def delete(self, project, queue): - self._col.delete_one({ - PRIMARY_KEY: utils.scope_queue_name(queue, project)}) - - def update(self, project, queue, pool=None): - # NOTE(cpp-cabrera): _insert handles conn_error - res = self._insert(project, queue, pool, upsert=False) - - if not res['updatedExisting']: - raise errors.QueueNotMapped(queue, project) - - @utils.raises_conn_error - def drop_all(self): - self._col.drop() - self._col.ensure_index(CATALOGUE_INDEX, unique=True) - - -def _normalize(entry): - project, queue = utils.parse_scoped_project_queue(entry[PRIMARY_KEY]) - return { - 'queue': queue, - 'project': project, - 'pool': entry['s'] - } diff --git a/zaqar/storage/mongodb/claims.py b/zaqar/storage/mongodb/claims.py deleted file mode 100644 index ee267d29..00000000 --- a/zaqar/storage/mongodb/claims.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements the MongoDB storage controller for claims. - -Field Mappings: - In order to reduce the disk / memory space used, - field names will be, most of the time, the first - letter of their long name. -""" - -import datetime - -from bson import objectid -from oslo_log import log as logging -from oslo_utils import timeutils -from pymongo.collection import ReturnDocument - -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - -LOG = logging.getLogger(__name__) - - -def _messages_iter(msg_iter): - """Used to iterate through messages.""" - - msg = next(msg_iter) - yield msg.pop('claim') - yield msg - - # Smoke it! - for msg in msg_iter: - del msg['claim'] - yield msg - - -class ClaimController(storage.Claim): - """Implements claim resource operations using MongoDB. - - No dedicated collection is being used - for claims. - - Claims are created in the messages - collection and live within messages, that is, - in the c field. - - This implementation certainly uses more space - on disk but reduces the number of queries to - be executed and the time needed to retrieve - claims and claimed messages. - - As for the memory usage, this implementation - requires less memory since a single index is - required. The index is a compound index between - the claim id and it's expiration timestamp. - """ - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def get(self, queue, claim_id, project=None): - msg_ctrl = self.driver.message_controller - - # Base query, always check expire time - now = timeutils.utcnow_ts() - cid = utils.to_oid(claim_id) - if cid is None: - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - try: - # Lets get claim's data - # from the first message - # in the iterator - msgs = _messages_iter(msg_ctrl._claimed(queue, cid, now, - project=project)) - claim = next(msgs) - - update_time = claim['e'] - claim['t'] - age = now - update_time - - claim_meta = { - 'age': int(age), - 'ttl': claim['t'], - 'id': str(claim['id']), - } - except StopIteration: - raise errors.ClaimDoesNotExist(cid, queue, project) - - return claim_meta, msgs - - # NOTE(kgriffs): If we get an autoreconnect or any other connection error, - # the worst that can happen is you get an orphaned claim, but it will - # expire eventually and free up those messages to be claimed again. We - # might consider setting a "claim valid" flag similar to how posting - # messages works, in order to avoid this situation if it turns out to - # be a real problem for users. - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def create(self, queue, metadata, project=None, - limit=storage.DEFAULT_MESSAGES_PER_CLAIM): - """Creates a claim. - - This implementation was done in a best-effort fashion. - In order to create a claim we need to get a list - of messages that can be claimed. Once we have that - list we execute a query filtering by the ids returned - by the previous query. - - Since there's a lot of space for race conditions here, - we'll check if the number of updated records is equal to - the max number of messages to claim. If the number of updated - messages is lower than limit we'll try to claim the remaining - number of messages. - - This 2 queries are required because there's no way, as for the - time being, to execute an update on a limited number of records. - """ - msg_ctrl = self.driver.message_controller - queue_ctrl = self.driver.queue_controller - - ttl = metadata['ttl'] - grace = metadata['grace'] - oid = objectid.ObjectId() - - now = timeutils.utcnow_ts() - claim_expires = now + ttl - claim_expires_dt = datetime.datetime.utcfromtimestamp(claim_expires) - - message_ttl = ttl + grace - message_expiration = datetime.datetime.utcfromtimestamp( - claim_expires + grace) - - meta = { - 'id': oid, - 't': ttl, - 'e': claim_expires, - 'c': 0 # NOTE(flwang): A placeholder which will be updated later - } - - # Get a list of active, not claimed nor expired - # messages that could be claimed. - msgs = msg_ctrl._active(queue, projection={'_id': 1, 'c': 1}, - project=project, - limit=limit) - - messages = iter([]) - be_claimed = [(msg['_id'], msg['c'].get('c', 0)) for msg in msgs] - ids = [_id for _id, _ in be_claimed] - - if len(ids) == 0: - return None, messages - - # Get the maxClaimCount and deadLetterQueue from current queue's meta - queue_meta = queue_ctrl.get(queue, project=project) - - now = timeutils.utcnow_ts() - - # NOTE(kgriffs): Set the claim field for - # the active message batch, while also - # filtering out any messages that happened - # to get claimed just now by one or more - # parallel requests. - # - # Filtering by just 'c.e' works because - # new messages have that field initialized - # to the current time when the message is - # posted. There is no need to check whether - # 'c' exists or 'c.id' is None. - collection = msg_ctrl._collection(queue, project) - updated = collection.update({'_id': {'$in': ids}, - 'c.e': {'$lte': now}}, - {'$set': {'c': meta}}, - upsert=False, - multi=True)['n'] - - # NOTE(flaper87): Dirty hack! - # This sets the expiration time to - # `expires` on messages that would - # expire before claim. - new_values = {'e': message_expiration, 't': message_ttl} - collection.update({'p_q': utils.scope_queue_name(queue, project), - 'e': {'$lt': claim_expires_dt}, - 'c.id': oid}, - {'$set': new_values}, - upsert=False, multi=True) - - if ('_max_claim_count' in queue_meta and - '_dead_letter_queue' in queue_meta): - LOG.debug(u"The list of messages being claimed: %(be_claimed)s", - {"be_claimed": be_claimed}) - - for _id, claimed_count in be_claimed: - # NOTE(flwang): We have claimed the message above, but we will - # update the claim count below. So that means, when the - # claimed_count equals queue_meta['_max_claim_count'], the - # message has met the threshold. And Zaqar will move it to the - # DLQ. - if claimed_count < queue_meta['_max_claim_count']: - # 1. Save the new max claim count for message - collection.update({'_id': _id, - 'c.id': oid}, - {'$set': {'c.c': claimed_count + 1}}, - upsert=False) - LOG.debug(u"Message %(id)s has been claimed %(count)d " - u"times.", {"id": str(_id), - "count": claimed_count + 1}) - else: - # 2. Check if the message's claim count has exceeded the - # max claim count defined in the queue, if so, move the - # message to the dead letter queue. - - # NOTE(flwang): We're moving message directly. That means, - # the queue and dead letter queue must be created on the - # same storage pool. It's a technical tradeoff, because if - # we re-send the message to the dead letter queue by - # message controller, then we will lost all the claim - # information. - dlq_name = queue_meta['_dead_letter_queue'] - new_msg = {'c.c': claimed_count, - 'p_q': utils.scope_queue_name(dlq_name, - project)} - dlq_ttl = queue_meta.get("_dead_letter_queue_messages_ttl") - if dlq_ttl: - new_msg['t'] = dlq_ttl - kwargs = {"return_document": ReturnDocument.AFTER} - msg = collection.find_one_and_update({'_id': _id, - 'c.id': oid}, - {'$set': new_msg}, - **kwargs) - dlq_collection = msg_ctrl._collection(dlq_name, project) - if not dlq_collection: - LOG.warning(u"Failed to find the message collection " - u"for queue %(dlq_name)s", {"dlq_name": - dlq_name}) - return None, iter([]) - result = dlq_collection.insert_one(msg) - if result.inserted_id: - collection.delete_one({'_id': _id}) - LOG.debug(u"Message %(id)s has met the max claim count " - u"%(count)d, now it has been moved to dead " - u"letter queue %(dlq_name)s.", - {"id": str(_id), "count": claimed_count, - "dlq_name": dlq_name}) - # NOTE(flwang): Because the claimed count has meet the - # max, so the current claim is not valid. And technically, - # it's failed to create the claim. - return None, iter([]) - - if updated != 0: - # NOTE(kgriffs): This extra step is necessary because - # in between having gotten a list of active messages - # and updating them, some of them may have been - # claimed by a parallel request. Therefore, we need - # to find out which messages were actually tagged - # with the claim ID successfully. - claim, messages = self.get(queue, oid, project=project) - - return str(oid), messages - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def update(self, queue, claim_id, metadata, project=None): - cid = utils.to_oid(claim_id) - if cid is None: - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - now = timeutils.utcnow_ts() - grace = metadata['grace'] - ttl = metadata['ttl'] - claim_expires = now + ttl - claim_expires_dt = datetime.datetime.utcfromtimestamp(claim_expires) - message_ttl = ttl + grace - message_expires = datetime.datetime.utcfromtimestamp( - claim_expires + grace) - - msg_ctrl = self.driver.message_controller - claimed = msg_ctrl._claimed(queue, cid, expires=now, - limit=1, project=project) - - try: - next(claimed) - except StopIteration: - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - meta = { - 'id': cid, - 't': ttl, - 'e': claim_expires, - } - - # TODO(kgriffs): Create methods for these so we don't interact - # with the messages collection directly (loose coupling) - scope = utils.scope_queue_name(queue, project) - collection = msg_ctrl._collection(queue, project) - collection.update({'p_q': scope, 'c.id': cid}, - {'$set': {'c': meta}}, - upsert=False, multi=True) - - # NOTE(flaper87): Dirty hack! - # This sets the expiration time to - # `expires` on messages that would - # expire before claim. - collection.update({'p_q': scope, - 'e': {'$lt': claim_expires_dt}, - 'c.id': cid}, - {'$set': {'e': message_expires, - 't': message_ttl}}, - upsert=False, multi=True) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def delete(self, queue, claim_id, project=None): - msg_ctrl = self.driver.message_controller - msg_ctrl._unclaim(queue, claim_id, project=project) diff --git a/zaqar/storage/mongodb/controllers.py b/zaqar/storage/mongodb/controllers.py deleted file mode 100644 index 7706f153..00000000 --- a/zaqar/storage/mongodb/controllers.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Exports Mongodb storage controllers. - -Field Mappings: - In order to reduce the disk / memory space used, - fields name will be, most of the time, the first - letter of their long name. Fields mapping will be - updated and documented in each controller class. -""" - -from zaqar.storage.mongodb import catalogue -from zaqar.storage.mongodb import claims -from zaqar.storage.mongodb import flavors -from zaqar.storage.mongodb import messages -from zaqar.storage.mongodb import pools -from zaqar.storage.mongodb import queues -from zaqar.storage.mongodb import subscriptions - - -CatalogueController = catalogue.CatalogueController -ClaimController = claims.ClaimController -FlavorsController = flavors.FlavorsController -MessageController = messages.MessageController -FIFOMessageController = messages.FIFOMessageController -QueueController = queues.QueueController -PoolsController = pools.PoolsController -SubscriptionController = subscriptions.SubscriptionController diff --git a/zaqar/storage/mongodb/driver.py b/zaqar/storage/mongodb/driver.py deleted file mode 100644 index 793ad244..00000000 --- a/zaqar/storage/mongodb/driver.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Mongodb storage driver implementation.""" - -import ssl - -from osprofiler import profiler -import pymongo -import pymongo.errors - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar import storage -from zaqar.storage.mongodb import controllers -from zaqar.storage.mongodb import options - - -def _connection(conf): - # NOTE(flaper87): remove possible zaqar specific - # schemes like: mongodb.fifo - uri = conf.uri - - if conf.uri: - uri = "mongodb://%s" % (conf.uri.split("://")[-1]) - - if conf.uri and 'replicaSet' in conf.uri: - MongoClient = pymongo.MongoReplicaSetClient - else: - MongoClient = pymongo.MongoClient - - if conf.uri and 'ssl=true' in conf.uri.lower(): - kwargs = {'connect': False} - - # Default to CERT_REQUIRED - ssl_cert_reqs = ssl.CERT_REQUIRED - - if conf.ssl_cert_reqs == 'CERT_OPTIONAL': - ssl_cert_reqs = ssl.CERT_OPTIONAL - - if conf.ssl_cert_reqs == 'CERT_NONE': - ssl_cert_reqs = ssl.CERT_NONE - - kwargs['ssl_cert_reqs'] = ssl_cert_reqs - - if conf.ssl_keyfile: - kwargs['ssl_keyfile'] = conf.ssl_keyfile - if conf.ssl_certfile: - kwargs['ssl_certfile'] = conf.ssl_certfile - if conf.ssl_ca_certs: - kwargs['ssl_ca_certs'] = conf.ssl_ca_certs - - return MongoClient(uri, **kwargs) - - return MongoClient(uri, connect=False) - - -class DataDriver(storage.DataDriverBase): - - BASE_CAPABILITIES = tuple(storage.Capabilities) - - _DRIVER_OPTIONS = options._config_options() - - _COL_SUFIX = "_messages_p" - - def __init__(self, conf, cache, control_driver): - super(DataDriver, self).__init__(conf, cache, control_driver) - - self.mongodb_conf = self.conf[options.MESSAGE_MONGODB_GROUP] - - conn = self.connection - server_info = conn.server_info()['version'] - self.server_version = tuple(map(int, server_info.split('.'))) - - if self.server_version < (2, 2): - raise RuntimeError(_('The mongodb driver requires mongodb>=2.2, ' - '%s found') % server_info) - - if not len(conn.nodes) > 1 and not conn.is_mongos: - if not self.conf.unreliable: - raise RuntimeError(_('Either a replica set or a mongos is ' - 'required to guarantee message delivery')) - else: - - _mongo_wc = conn.write_concern.document.get('w') - # NOTE(flwang): mongo client is using None as the default value of - # write concern. But in Python 3.x we can't compare by order - # different types of operands like in Python 2.x. - # And we can't set the write concern value when create the - # connection since it will fail with norepl if mongodb version - # below 2.6. Besides it doesn't make sense to create the - # connection again after getting the version. - durable = (_mongo_wc is not None and - (_mongo_wc == 'majority' or _mongo_wc >= 2) - ) - - if not self.conf.unreliable and not durable: - raise RuntimeError(_('Using a write concern other than ' - '`majority` or > 2 makes the service ' - 'unreliable. Please use a different ' - 'write concern or set `unreliable` ' - 'to True in the config file.')) - - # FIXME(flaper87): Make this dynamic - self._capabilities = self.BASE_CAPABILITIES - - @property - def capabilities(self): - return self._capabilities - - def is_alive(self): - try: - # NOTE(zyuan): Requires admin access to mongodb - return 'ok' in self.connection.admin.command('ping') - - except pymongo.errors.PyMongoError: - return False - - def close(self): - self.connection.close() - - def _health(self): - KPI = {} - KPI['storage_reachable'] = self.is_alive() - KPI['operation_status'] = self._get_operation_status() - message_volume = {'free': 0, 'claimed': 0, 'total': 0} - - for msg_col in [db.messages for db in self.message_databases]: - msg_count_claimed = msg_col.find({'c.id': {'$ne': None}}).count() - message_volume['claimed'] += msg_count_claimed - - msg_count_total = msg_col.find().count() - message_volume['total'] += msg_count_total - - message_volume['free'] = (message_volume['total'] - - message_volume['claimed']) - KPI['message_volume'] = message_volume - return KPI - - @decorators.lazy_property(write=False) - def message_databases(self): - """List of message databases, ordered by partition number.""" - - kwargs = {} - if not self.server_version < (2, 6): - # NOTE(flaper87): Skip mongodb versions below 2.6 when - # setting the write concern on the database. pymongo 3.0 - # fails with norepl when creating indexes. - doc = self.connection.write_concern.document.copy() - doc.setdefault('w', 'majority') - doc.setdefault('j', False) - kwargs['write_concern'] = pymongo.WriteConcern(**doc) - - name = self.mongodb_conf.database - partitions = self.mongodb_conf.partitions - - databases = [] - for p in range(partitions): - db_name = name + self._COL_SUFIX + str(p) - databases.append(self.connection.get_database(db_name, **kwargs)) - return databases - - @decorators.lazy_property(write=False) - def subscriptions_database(self): - """Database dedicated to the "subscription" collection.""" - name = self.mongodb_conf.database + '_subscriptions' - return self.connection[name] - - @decorators.lazy_property(write=False) - def connection(self): - """MongoDB client connection instance.""" - return _connection(self.mongodb_conf) - - @decorators.lazy_property(write=False) - def message_controller(self): - controller = controllers.MessageController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("mongodb_message_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def claim_controller(self): - controller = controllers.ClaimController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("mongodb_claim_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def subscription_controller(self): - controller = controllers.SubscriptionController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("mongodb_subscription_" - "controller")(controller) - else: - return controller - - -class FIFODataDriver(DataDriver): - - BASE_CAPABILITIES = (storage.Capabilities.DURABILITY, - storage.Capabilities.CLAIMS, - storage.Capabilities.AOD, - storage.Capabilities.HIGH_THROUGHPUT) - - _COL_SUFIX = "_messages_fifo_p" - - @decorators.lazy_property(write=False) - def message_controller(self): - controller = controllers.FIFOMessageController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("mongodb_message_controller")(controller) - else: - return controller - - -class ControlDriver(storage.ControlDriverBase): - - def __init__(self, conf, cache): - super(ControlDriver, self).__init__(conf, cache) - - self.conf.register_opts(options.MANAGEMENT_MONGODB_OPTIONS, - group=options.MANAGEMENT_MONGODB_GROUP) - - self.mongodb_conf = self.conf[options.MANAGEMENT_MONGODB_GROUP] - - def close(self): - self.connection.close() - - @decorators.lazy_property(write=False) - def connection(self): - """MongoDB client connection instance.""" - return _connection(self.mongodb_conf) - - @decorators.lazy_property(write=False) - def database(self): - name = self.mongodb_conf.database - return self.connection[name] - - @decorators.lazy_property(write=False) - def queues_database(self): - """Database dedicated to the "queues" collection. - - The queues collection is separated out into its own database - to avoid writer lock contention with the messages collections. - """ - - name = self.mongodb_conf.database + '_queues' - return self.connection[name] - - @decorators.lazy_property(write=False) - def queue_controller(self): - controller = controllers.QueueController(self) - if (self.conf.profiler.enabled and - (self.conf.profiler.trace_message_store or - self.conf.profiler.trace_management_store)): - return profiler.trace_cls("mongodb_queues_controller")(controller) - else: - return controller - - @property - def pools_controller(self): - controller = controllers.PoolsController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_management_store): - return profiler.trace_cls("mongodb_pools_controller")(controller) - else: - return controller - - @property - def catalogue_controller(self): - controller = controllers.CatalogueController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_management_store): - return profiler.trace_cls("mongodb_catalogue_" - "controller")(controller) - else: - return controller - - @property - def flavors_controller(self): - controller = controllers.FlavorsController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_management_store): - return profiler.trace_cls("mongodb_flavors_controller")(controller) - else: - return controller diff --git a/zaqar/storage/mongodb/flavors.py b/zaqar/storage/mongodb/flavors.py deleted file mode 100644 index 649a639c..00000000 --- a/zaqar/storage/mongodb/flavors.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -""" -Schema: - 'n': name :: six.text_type - 'p': project :: six.text_type - 's': storage pool_group :: six.text_type - 'c': capabilities :: dict -""" - -import functools - -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - -FLAVORS_INDEX = [ - ('p', 1), - ('n', 1), -] - -FLAVORS_STORAGE_POOL_INDEX = [ - ('s', 1) -] - -# NOTE(cpp-cabrera): used for get/list operations. There's no need to -# show the marker or the _id - they're implementation details. -OMIT_FIELDS = (('_id', False),) - - -def _field_spec(detailed=False): - return dict(OMIT_FIELDS + (() if detailed else (('c', False),))) - - -class FlavorsController(base.FlavorsBase): - - def __init__(self, *args, **kwargs): - super(FlavorsController, self).__init__(*args, **kwargs) - - self._col = self.driver.database.flavors - self._col.ensure_index(FLAVORS_INDEX, - background=True, - name='flavors_name', - unique=True) - self._col.ensure_index(FLAVORS_STORAGE_POOL_INDEX, - background=True, - name='flavors_storage_pool_group_name') - - self._pools_ctrl = self.driver.pools_controller - - @utils.raises_conn_error - def _list_by_pool_group(self, pool_group, limit=10, detailed=False): - query = {'s': pool_group} - cursor = self._col.find(query, projection=_field_spec(detailed), - limit=limit).sort('n', 1) - - normalizer = functools.partial(_normalize, detailed=detailed) - return utils.HookedCursor(cursor, normalizer) - - @utils.raises_conn_error - def list(self, project=None, marker=None, limit=10, detailed=False): - query = {'p': project} - if marker is not None: - query['n'] = {'$gt': marker} - - cursor = self._col.find(query, projection=_field_spec(detailed), - limit=limit).sort('n', 1) - marker_name = {} - - def normalizer(flavor): - marker_name['next'] = flavor['n'] - return _normalize(flavor, detailed=detailed) - - yield utils.HookedCursor(cursor, normalizer) - yield marker_name and marker_name['next'] - - @utils.raises_conn_error - def get(self, name, project=None, detailed=False): - res = self._col.find_one({'n': name, 'p': project}, - _field_spec(detailed)) - - if not res: - raise errors.FlavorDoesNotExist(name) - - return _normalize(res, detailed) - - @utils.raises_conn_error - def create(self, name, pool_group, project=None, capabilities=None): - - # NOTE(flaper87): Check if there are pools in this group. - # Should there be a `group_exists` method? - # NOTE(wanghao): Since we didn't pass the group name just pool name, - # so we don't need to get the pool by group. - if not list(self._pools_ctrl.get_pools_by_group(pool_group)): - raise errors.PoolGroupDoesNotExist(pool_group) - - capabilities = {} if capabilities is None else capabilities - self._col.update({'n': name, 'p': project}, - {'$set': {'s': pool_group, 'c': capabilities}}, - upsert=True) - - @utils.raises_conn_error - def exists(self, name, project=None): - return self._col.find_one({'n': name, 'p': project}) is not None - - @utils.raises_conn_error - def update(self, name, project=None, pool_group=None, capabilities=None): - fields = {} - - if capabilities is not None: - fields['c'] = capabilities - - if pool_group is not None: - fields['s'] = pool_group - - assert fields, '`pool_group` or `capabilities` not found in kwargs' - res = self._col.update({'n': name, 'p': project}, - {'$set': fields}, - upsert=False) - - if not res['updatedExisting']: - raise errors.FlavorDoesNotExist(name) - - @utils.raises_conn_error - def delete(self, name, project=None): - self._col.delete_one({'n': name, 'p': project}) - - @utils.raises_conn_error - def drop_all(self): - self._col.drop() - self._col.ensure_index(FLAVORS_INDEX, unique=True) - - -def _normalize(flavor, detailed=False): - ret = { - 'name': flavor['n'], - 'pool_group': flavor['s'], - } - - if detailed: - ret['capabilities'] = flavor['c'] - - return ret diff --git a/zaqar/storage/mongodb/messages.py b/zaqar/storage/mongodb/messages.py deleted file mode 100644 index a4a98c76..00000000 --- a/zaqar/storage/mongodb/messages.py +++ /dev/null @@ -1,1038 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements MongoDB the storage controller for messages. - -Field Mappings: - In order to reduce the disk / memory space used, - field names will be, most of the time, the first - letter of their long name. -""" - -import datetime -import time - -from bson import objectid -from oslo_log import log as logging -from oslo_utils import timeutils -import pymongo.errors -import pymongo.read_preferences - -from zaqar.i18n import _ -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - - -LOG = logging.getLogger(__name__) - -# NOTE(kgriffs): This value, in seconds, should be at least less than the -# minimum allowed TTL for messages (60 seconds). Make it 45 to allow for -# some fudge room. -MAX_RETRY_POST_DURATION = 45 - -# NOTE(kgriffs): It is extremely unlikely that all workers would somehow hang -# for more than 5 seconds, without a single one being able to succeed in -# posting some messages and incrementing the counter, thus allowing the other -# producers to succeed in turn. -COUNTER_STALL_WINDOW = 5 - -# For hinting -ID_INDEX_FIELDS = [('_id', 1)] - -# For removing expired messages -TTL_INDEX_FIELDS = [ - ('e', 1), -] - -# NOTE(cpp-cabrera): to unify use of project/queue across mongodb -# storage impls. -PROJ_QUEUE = utils.PROJ_QUEUE_KEY - -# NOTE(kgriffs): This index is for listing messages, usually -# filtering out claimed ones. -ACTIVE_INDEX_FIELDS = [ - (PROJ_QUEUE, 1), # Project will be unique, so put first - ('k', 1), # Used for sorting and paging, must come before range queries - ('c.e', 1), # Used for filtering out claimed messages - - # NOTE(kgriffs): We do not include 'u' and 'tx' here on - # purpose. It was found experimentally that adding 'u' did - # not improve performance, and so it was left out in order - # to reduce index size and make updating the index - # faster. When 'tx' was added, it was assumed that it would - # follow a similar performance pattern to 'u', since by - # the time you traverse the index down past the fields - # listed above, there is very little left to scan, esp. - # considering all queries are limited (limit=) to a fairly - # small number. - # - # TODO(kgriffs): The extrapolation wrt 'tx' needs to be - # proven empirically. -] - -# For counting -COUNTING_INDEX_FIELDS = [ - (PROJ_QUEUE, 1), # Project will be unique, so put first - ('c.e', 1), # Used for filtering out claimed messages -] - -# Index used for claims -CLAIMED_INDEX_FIELDS = [ - (PROJ_QUEUE, 1), - ('c.id', 1), - ('k', 1), - ('c.e', 1), -] - -# This index is meant to be used as a shard-key and to ensure -# uniqueness for markers. -# -# As for other compound indexes, order matters. The marker `k` -# gives enough cardinality to ensure chunks are evenly distributed, -# whereas the `p_q` field helps keeping chunks from the same project -# and queue together. -# -# In a sharded environment, uniqueness of this index is still guaranteed -# because it's used as a shard key. -MARKER_INDEX_FIELDS = [ - ('k', 1), - (PROJ_QUEUE, 1), -] - -TRANSACTION_INDEX_FIELDS = [ - ('tx', 1), -] - - -class MessageController(storage.Message): - """Implements message resource operations using MongoDB. - - Messages are scoped by project + queue. - - :: - - Messages: - Name Field - ------------------------- - scope -> p_q - ttl -> t - expires -> e - marker -> k - body -> b - claim -> c - client uuid -> u - transaction -> tx - """ - - def __init__(self, *args, **kwargs): - super(MessageController, self).__init__(*args, **kwargs) - - # Cache for convenience and performance - self._num_partitions = self.driver.mongodb_conf.partitions - self._queue_ctrl = self.driver.queue_controller - self._retry_range = range(self.driver.mongodb_conf.max_attempts) - - # Create a list of 'messages' collections, one for each database - # partition, ordered by partition number. - # - # NOTE(kgriffs): Order matters, since it is used to lookup the - # collection by partition number. For example, self._collections[2] - # would provide access to zaqar_p2.messages (partition numbers are - # zero-based). - self._collections = [db.messages - for db in self.driver.message_databases] - - # Ensure indexes are initialized before any queries are performed - for collection in self._collections: - self._ensure_indexes(collection) - - # ---------------------------------------------------------------------- - # Helpers - # ---------------------------------------------------------------------- - - def _ensure_indexes(self, collection): - """Ensures that all indexes are created.""" - - collection.ensure_index(TTL_INDEX_FIELDS, - name='ttl', - expireAfterSeconds=0, - background=True) - - collection.ensure_index(ACTIVE_INDEX_FIELDS, - name='active', - background=True) - - collection.ensure_index(CLAIMED_INDEX_FIELDS, - name='claimed', - background=True) - - collection.ensure_index(COUNTING_INDEX_FIELDS, - name='counting', - background=True) - - collection.ensure_index(MARKER_INDEX_FIELDS, - name='queue_marker', - background=True) - - collection.ensure_index(TRANSACTION_INDEX_FIELDS, - name='transaction', - background=True) - - def _collection(self, queue_name, project=None): - """Get a partitioned collection instance.""" - return self._collections[utils.get_partition(self._num_partitions, - queue_name, project)] - - def _backoff_sleep(self, attempt): - """Sleep between retries using a jitter algorithm. - - Mitigates thrashing between multiple parallel requests, and - creates backpressure on clients to slow down the rate - at which they submit requests. - - :param attempt: current attempt number, zero-based - """ - conf = self.driver.mongodb_conf - seconds = utils.calculate_backoff(attempt, conf.max_attempts, - conf.max_retry_sleep, - conf.max_retry_jitter) - - time.sleep(seconds) - - def _purge_queue(self, queue_name, project=None): - """Removes all messages from the queue. - - Warning: Only use this when deleting the queue; otherwise - you can cause a side-effect of reseting the marker counter - which can cause clients to miss tons of messages. - - If the queue does not exist, this method fails silently. - - :param queue_name: name of the queue to purge - :param project: ID of the project to which the queue belongs - """ - scope = utils.scope_queue_name(queue_name, project) - collection = self._collection(queue_name, project) - collection.delete_many({PROJ_QUEUE: scope}) - - def _list(self, queue_name, project=None, marker=None, - echo=False, client_uuid=None, projection=None, - include_claimed=False, sort=1, limit=None): - """Message document listing helper. - - :param queue_name: Name of the queue to list - :param project: (Default None) Project `queue_name` belongs to. If - not specified, queries the "global" namespace/project. - :param marker: (Default None) Message marker from which to start - iterating. If not specified, starts with the first message - available in the queue. - :param echo: (Default False) Whether to return messages that match - client_uuid - :param client_uuid: (Default None) UUID for the client that - originated this request - :param projection: (Default None) a list of field names that should be - returned in the result set or a dict specifying the fields to - include or exclude - :param include_claimed: (Default False) Whether to include - claimed messages, not just active ones - :param sort: (Default 1) Sort order for the listing. Pass 1 for - ascending (oldest message first), or -1 for descending (newest - message first). - :param limit: (Default None) The maximum number of messages - to list. The results may include fewer messages than the - requested `limit` if not enough are available. If limit is - not specified - - :returns: Generator yielding up to `limit` messages. - """ - - if sort not in (1, -1): - raise ValueError(u'sort must be either 1 (ascending) ' - u'or -1 (descending)') - - now = timeutils.utcnow_ts() - - query = { - # Messages must belong to this queue and project. - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - - # NOTE(kgriffs): Messages must be finalized (i.e., must not - # be part of an unfinalized transaction). - # - # See also the note wrt 'tx' within the definition - # of ACTIVE_INDEX_FIELDS. - 'tx': None, - } - - if not echo: - query['u'] = {'$ne': client_uuid} - - if marker is not None: - query['k'] = {'$gt': marker} - - collection = self._collection(queue_name, project) - - if not include_claimed: - # Only include messages that are not part of - # any claim, or are part of an expired claim. - query['c.e'] = {'$lte': now} - - # Construct the request - cursor = collection.find(query, - projection=projection, - sort=[('k', sort)]) - - if limit is not None: - cursor.limit(limit) - - # NOTE(flaper87): Suggest the index to use for this query to - # ensure the most performant one is chosen. - return cursor.hint(ACTIVE_INDEX_FIELDS) - - # ---------------------------------------------------------------------- - # "Friends" interface - # ---------------------------------------------------------------------- - - def _count(self, queue_name, project=None, include_claimed=False): - """Return total number of messages in a queue. - - This method is designed to very quickly count the number - of messages in a given queue. Expired messages are not - counted, of course. If the queue does not exist, the - count will always be 0. - - Note: Some expired messages may be included in the count if - they haven't been GC'd yet. This is done for performance. - """ - query = { - # Messages must belong to this queue and project. - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - - # NOTE(kgriffs): Messages must be finalized (i.e., must not - # be part of an unfinalized transaction). - # - # See also the note wrt 'tx' within the definition - # of ACTIVE_INDEX_FIELDS. - 'tx': None, - } - - if not include_claimed: - # Exclude messages that are claimed - query['c.e'] = {'$lte': timeutils.utcnow_ts()} - - collection = self._collection(queue_name, project) - return collection.count(filter=query, hint=COUNTING_INDEX_FIELDS) - - def _active(self, queue_name, marker=None, echo=False, - client_uuid=None, projection=None, project=None, - limit=None): - - return self._list(queue_name, project=project, marker=marker, - echo=echo, client_uuid=client_uuid, - projection=projection, include_claimed=False, - limit=limit) - - def _claimed(self, queue_name, claim_id, - expires=None, limit=None, project=None): - - if claim_id is None: - claim_id = {'$ne': None} - - query = { - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - 'c.id': claim_id, - 'c.e': {'$gt': expires or timeutils.utcnow_ts()}, - } - - kwargs = {} - collection = self._collection(queue_name, project) - - # NOTE(kgriffs): Claimed messages bust be queried from - # the primary to avoid a race condition caused by the - # multi-phased "create claim" algorithm. - # NOTE(flaper87): In pymongo 3.0 PRIMARY is the default and - # `read_preference` is read only. We'd need to set it when the - # client is created. - msgs = collection.find(query, sort=[('k', 1)], **kwargs).hint( - CLAIMED_INDEX_FIELDS) - - if limit is not None: - msgs = msgs.limit(limit) - - now = timeutils.utcnow_ts() - - def denormalizer(msg): - doc = _basic_message(msg, now) - doc['claim'] = msg['c'] - - return doc - - return utils.HookedCursor(msgs, denormalizer) - - def _unclaim(self, queue_name, claim_id, project=None): - cid = utils.to_oid(claim_id) - - # NOTE(cpp-cabrera): early abort - avoid a DB query if we're handling - # an invalid ID - if cid is None: - return - - # NOTE(cpp-cabrera): unclaim by setting the claim ID to None - # and the claim expiration time to now - now = timeutils.utcnow_ts() - scope = utils.scope_queue_name(queue_name, project) - collection = self._collection(queue_name, project) - - collection.update({PROJ_QUEUE: scope, 'c.id': cid}, - {'$set': {'c': {'id': None, 'e': now}}}, - upsert=False, multi=True) - - def _inc_counter(self, queue_name, project=None, amount=1, window=None): - """Increments the message counter and returns the new value. - - :param queue_name: Name of the queue to which the counter is scoped - :param project: Queue's project name - :param amount: (Default 1) Amount by which to increment the counter - :param window: (Default None) A time window, in seconds, that - must have elapsed since the counter was last updated, in - order to increment the counter. - - :returns: Updated message counter value, or None if window - was specified, and the counter has already been updated - within the specified time period. - - :raises QueueDoesNotExist: if not found - """ - - # NOTE(flaper87): If this `if` is True, it means we're - # using a mongodb in the control plane. To avoid breaking - # environments doing so already, we'll keep using the counter - # in the mongodb queue_controller rather than the one in the - # message_controller. This should go away, eventually - if hasattr(self._queue_ctrl, '_inc_counter'): - return self._queue_ctrl._inc_counter(queue_name, project, - amount, window) - - now = timeutils.utcnow_ts() - - update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} - query = _get_scoped_query(queue_name, project) - if window is not None: - threshold = now - window - query['c.t'] = {'$lt': threshold} - - while True: - try: - collection = self._collection(queue_name, project).stats - doc = collection.find_one_and_update( - query, update, - return_document=pymongo.ReturnDocument.AFTER, - projection={'c.v': 1, '_id': 0}) - - break - except pymongo.errors.AutoReconnect as ex: - LOG.exception(ex) - - if doc is None: - if window is None: - # NOTE(kgriffs): Since we did not filter by a time window, - # the queue should have been found and updated. Perhaps - # the queue has been deleted? - message = (u'Failed to increment the message ' - u'counter for queue %(name)s and ' - u'project %(project)s') - message %= dict(name=queue_name, project=project) - - LOG.warning(message) - - raise errors.QueueDoesNotExist(queue_name, project) - - # NOTE(kgriffs): Assume the queue existed, but the counter - # was recently updated, causing the range query on 'c.t' to - # exclude the record. - return None - - return doc['c']['v'] - - def _get_counter(self, queue_name, project=None): - """Retrieves the current message counter value for a given queue. - - This helper is used to generate monotonic pagination - markers that are saved as part of the message - document. - - Note 1: Markers are scoped per-queue and so are *not* - globally unique or globally ordered. - - Note 2: If two or more requests to this method are made - in parallel, this method will return the same counter - value. This is done intentionally so that the caller - can detect a parallel message post, allowing it to - mitigate race conditions between producer and - observer clients. - - :param queue_name: Name of the queue to which the counter is scoped - :param project: Queue's project - :returns: current message counter as an integer - """ - - # NOTE(flaper87): If this `if` is True, it means we're - # using a mongodb in the control plane. To avoid breaking - # environments doing so already, we'll keep using the counter - # in the mongodb queue_controller rather than the one in the - # message_controller. This should go away, eventually - if hasattr(self._queue_ctrl, '_get_counter'): - return self._queue_ctrl._get_counter(queue_name, project) - - update = {'$inc': {'c.v': 0, 'c.t': 0}} - query = _get_scoped_query(queue_name, project) - - try: - collection = self._collection(queue_name, project).stats - doc = collection.find_one_and_update( - query, update, upsert=True, - return_document=pymongo.ReturnDocument.AFTER, - projection={'c.v': 1, '_id': 0}) - - return doc['c']['v'] - except pymongo.errors.AutoReconnect as ex: - LOG.exception(ex) - - # ---------------------------------------------------------------------- - # Public interface - # ---------------------------------------------------------------------- - - def list(self, queue_name, project=None, marker=None, - limit=storage.DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, include_claimed=False): - - if marker is not None: - try: - marker = int(marker) - except ValueError: - yield iter([]) - - messages = self._list(queue_name, project=project, marker=marker, - client_uuid=client_uuid, echo=echo, - include_claimed=include_claimed, limit=limit) - - marker_id = {} - - now = timeutils.utcnow_ts() - - # NOTE (kgriffs) @utils.raises_conn_error not needed on this - # function, since utils.HookedCursor already has it. - def denormalizer(msg): - marker_id['next'] = msg['k'] - - return _basic_message(msg, now) - - yield utils.HookedCursor(messages, denormalizer) - yield str(marker_id['next']) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def first(self, queue_name, project=None, sort=1): - cursor = self._list(queue_name, project=project, - include_claimed=True, sort=sort, - limit=1) - try: - message = next(cursor) - except StopIteration: - raise errors.QueueIsEmpty(queue_name, project) - - now = timeutils.utcnow_ts() - return _basic_message(message, now) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def get(self, queue_name, message_id, project=None): - mid = utils.to_oid(message_id) - if mid is None: - raise errors.MessageDoesNotExist(message_id, queue_name, - project) - - now = timeutils.utcnow_ts() - - query = { - '_id': mid, - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - } - - collection = self._collection(queue_name, project) - message = list(collection.find(query).limit(1).hint(ID_INDEX_FIELDS)) - - if not message: - raise errors.MessageDoesNotExist(message_id, queue_name, - project) - - return _basic_message(message[0], now) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def bulk_get(self, queue_name, message_ids, project=None): - message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] - if not message_ids: - return iter([]) - - now = timeutils.utcnow_ts() - - # Base query, always check expire time - query = { - '_id': {'$in': message_ids}, - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - } - - collection = self._collection(queue_name, project) - - # NOTE(flaper87): Should this query - # be sorted? - messages = collection.find(query).hint(ID_INDEX_FIELDS) - - def denormalizer(msg): - return _basic_message(msg, now) - - return utils.HookedCursor(messages, denormalizer) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def post(self, queue_name, messages, client_uuid, project=None): - # NOTE(flaper87): This method should be safe to retry on - # autoreconnect, since we've a 2-step insert for messages. - # The worst-case scenario is that we'll increase the counter - # several times and we'd end up with some non-active messages. - - if not self._queue_ctrl.exists(queue_name, project): - raise errors.QueueDoesNotExist(queue_name, project) - - # NOTE(flaper87): Make sure the counter exists. This method - # is an upsert. - self._get_counter(queue_name, project) - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - collection = self._collection(queue_name, project) - - messages = list(messages) - msgs_n = len(messages) - next_marker = self._inc_counter(queue_name, - project, - amount=msgs_n) - msgs_n - - prepared_messages = [ - { - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - 't': message['ttl'], - 'e': now_dt + datetime.timedelta(seconds=message['ttl']), - 'u': client_uuid, - 'c': {'id': None, 'e': now, 'c': 0}, - 'b': message['body'] if 'body' in message else {}, - 'k': next_marker + index, - 'tx': None, - } - - for index, message in enumerate(messages) - ] - - ids = collection.insert(prepared_messages, check_keys=False) - - return [str(id_) for id_ in ids] - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def delete(self, queue_name, message_id, project=None, claim=None): - # NOTE(cpp-cabrera): return early - this is an invalid message - # id so we won't be able to find it any way - mid = utils.to_oid(message_id) - if mid is None: - return - - collection = self._collection(queue_name, project) - - query = { - '_id': mid, - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - } - - cid = utils.to_oid(claim) - if cid is None: - raise errors.ClaimDoesNotExist(claim, queue_name, project) - - now = timeutils.utcnow_ts() - cursor = collection.find(query).hint(ID_INDEX_FIELDS) - - try: - message = next(cursor) - except StopIteration: - return - - if claim is None: - if _is_claimed(message, now): - raise errors.MessageIsClaimed(message_id) - - else: - if message['c']['id'] != cid: - kwargs = {} - # NOTE(flaper87): In pymongo 3.0 PRIMARY is the default and - # `read_preference` is read only. We'd need to set it when the - # client is created. - # NOTE(kgriffs): Read from primary in case the message - # was just barely claimed, and claim hasn't made it to - # the secondary. - message = collection.find_one(query, **kwargs) - - if message['c']['id'] != cid: - if _is_claimed(message, now): - raise errors.MessageNotClaimedBy(message_id, claim) - - raise errors.MessageNotClaimed(message_id) - - collection.delete_one(query) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def bulk_delete(self, queue_name, message_ids, project=None): - message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] - query = { - '_id': {'$in': message_ids}, - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - } - - collection = self._collection(queue_name, project) - collection.delete_many(query) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def pop(self, queue_name, limit, project=None): - query = { - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - } - - # Only include messages that are not part of - # any claim, or are part of an expired claim. - now = timeutils.utcnow_ts() - query['c.e'] = {'$lte': now} - - collection = self._collection(queue_name, project) - projection = {'_id': 1, 't': 1, 'b': 1, 'c.id': 1} - - messages = (collection.find_and_modify(query, - projection=projection, - remove=True) - for _ in range(limit)) - - final_messages = [_basic_message(message, now) - for message in messages - if message] - - return final_messages - - -class FIFOMessageController(MessageController): - - def _ensure_indexes(self, collection): - """Ensures that all indexes are created.""" - - collection.ensure_index(TTL_INDEX_FIELDS, - name='ttl', - expireAfterSeconds=0, - background=True) - - collection.ensure_index(ACTIVE_INDEX_FIELDS, - name='active', - background=True) - - collection.ensure_index(CLAIMED_INDEX_FIELDS, - name='claimed', - background=True) - - collection.ensure_index(COUNTING_INDEX_FIELDS, - name='counting', - background=True) - - # NOTE(kgriffs): This index must be unique so that - # inserting a message with the same marker to the - # same queue will fail; this is used to detect a - # race condition which can cause an observer client - # to miss a message when there is more than one - # producer posting messages to the same queue, in - # parallel. - collection.ensure_index(MARKER_INDEX_FIELDS, - name='queue_marker', - unique=True, - background=True) - - collection.ensure_index(TRANSACTION_INDEX_FIELDS, - name='transaction', - background=True) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def post(self, queue_name, messages, client_uuid, project=None): - # NOTE(flaper87): This method should be safe to retry on - # autoreconnect, since we've a 2-step insert for messages. - # The worst-case scenario is that we'll increase the counter - # several times and we'd end up with some non-active messages. - - if not self._queue_ctrl.exists(queue_name, project): - raise errors.QueueDoesNotExist(queue_name, project) - - # NOTE(flaper87): Make sure the counter exists. This method - # is an upsert. - self._get_counter(queue_name, project) - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - collection = self._collection(queue_name, project) - - # Set the next basis marker for the first attempt. - # - # Note that we don't increment the counter right away because - # if 2 concurrent posts happen and the one with the higher counter - # ends before the one with the lower counter, there's a window - # where a client paging through the queue may get the messages - # with the higher counter and skip the previous ones. This would - # make our FIFO guarantee unsound. - next_marker = self._get_counter(queue_name, project) - - # Unique transaction ID to facilitate atomic batch inserts - transaction = objectid.ObjectId() - - prepared_messages = [ - { - PROJ_QUEUE: utils.scope_queue_name(queue_name, project), - 't': message['ttl'], - 'e': now_dt + datetime.timedelta(seconds=message['ttl']), - 'u': client_uuid, - 'c': {'id': None, 'e': now, 'c': 0}, - 'b': message['body'] if 'body' in message else {}, - 'k': next_marker + index, - 'tx': transaction, - } - - for index, message in enumerate(messages) - ] - - # NOTE(kgriffs): Don't take the time to do a 2-phase insert - # if there is no way for it to partially succeed. - if len(prepared_messages) == 1: - transaction = None - prepared_messages[0]['tx'] = None - - # Use a retry range for sanity, although we expect - # to rarely, if ever, reach the maximum number of - # retries. - # - # NOTE(kgriffs): With the default configuration (100 ms - # max sleep, 1000 max attempts), the max stall time - # before the operation is abandoned is 49.95 seconds. - for attempt in self._retry_range: - try: - ids = collection.insert(prepared_messages, check_keys=False) - - # Log a message if we retried, for debugging perf issues - if attempt != 0: - msgtmpl = _(u'%(attempts)d attempt(s) required to post ' - u'%(num_messages)d messages to queue ' - u'"%(queue)s" under project %(project)s') - - LOG.debug(msgtmpl, - dict(queue=queue_name, - attempts=attempt + 1, - num_messages=len(ids), - project=project)) - - # Update the counter in preparation for the next batch - # - # NOTE(kgriffs): Due to the unique index on the messages - # collection, competing inserts will fail as a whole, - # and keep retrying until the counter is incremented - # such that the competing marker's will start at a - # unique number, 1 past the max of the messages just - # inserted above. - self._inc_counter(queue_name, project, amount=len(ids)) - - # NOTE(kgriffs): Finalize the insert once we can say that - # all the messages made it. This makes bulk inserts - # atomic, assuming queries filter out any non-finalized - # messages. - if transaction is not None: - collection.update({'tx': transaction}, - {'$set': {'tx': None}}, - upsert=False, multi=True) - - return [str(id_) for id_ in ids] - - except pymongo.errors.DuplicateKeyError as ex: - # TODO(kgriffs): Record stats of how often retries happen, - # and how many attempts, on average, are required to insert - # messages. - - # NOTE(kgriffs): This can be used in conjunction with the - # log line, above, that is emitted after all messages have - # been posted, to gauge how long it is taking for messages - # to be posted to a given queue, or overall. - # - # TODO(kgriffs): Add transaction ID to help match up loglines - if attempt == 0: - msgtmpl = _(u'First attempt failed while ' - u'adding messages to queue ' - u'"%(queue)s" under project %(project)s') - - LOG.debug(msgtmpl, dict(queue=queue_name, project=project)) - - # NOTE(kgriffs): Never retry past the point that competing - # messages expire and are GC'd, since once they are gone, - # the unique index no longer protects us from getting out - # of order, which could cause an observer to miss this - # message. The code below provides a sanity-check to ensure - # this situation can not happen. - elapsed = timeutils.utcnow_ts() - now - if elapsed > MAX_RETRY_POST_DURATION: - msgtmpl = (u'Exceeded maximum retry duration for queue ' - u'"%(queue)s" under project %(project)s') - - LOG.warning(msgtmpl, - dict(queue=queue_name, project=project)) - break - - # Chill out for a moment to mitigate thrashing/thundering - self._backoff_sleep(attempt) - - # NOTE(kgriffs): Perhaps we failed because a worker crashed - # after inserting messages, but before incrementing the - # counter; that would cause all future requests to stall, - # since they would keep getting the same base marker that is - # conflicting with existing messages, until the messages that - # "won" expire, at which time we would end up reusing markers, - # and that could make some messages invisible to an observer - # that is querying with a marker that is large than the ones - # being reused. - # - # To mitigate this, we apply a heuristic to determine whether - # a counter has stalled. We attempt to increment the counter, - # but only if it hasn't been updated for a few seconds, which - # should mean that nobody is left to update it! - # - # Note that we increment one at a time until the logjam is - # broken, since we don't know how many messages were posted - # by the worker before it crashed. - next_marker = self._inc_counter( - queue_name, project, window=COUNTER_STALL_WINDOW) - - # Retry the entire batch with a new sequence of markers. - # - # NOTE(kgriffs): Due to the unique index, and how - # MongoDB works with batch requests, we will never - # end up with a partially-successful update. The first - # document in the batch will fail to insert, and the - # remainder of the documents will not be attempted. - if next_marker is None: - # NOTE(kgriffs): Usually we will end up here, since - # it should be rare that a counter becomes stalled. - next_marker = self._get_counter( - queue_name, project) - else: - msgtmpl = (u'Detected a stalled message counter ' - u'for queue "%(queue)s" under ' - u'project %(project)s.' - u'The counter was incremented to %(value)d.') - - LOG.warning(msgtmpl, - dict(queue=queue_name, - project=project, - value=next_marker)) - - for index, message in enumerate(prepared_messages): - message['k'] = next_marker + index - - except Exception as ex: - LOG.exception(ex) - raise - - msgtmpl = (u'Hit maximum number of attempts (%(max)s) for queue ' - u'"%(queue)s" under project %(project)s') - - LOG.warning(msgtmpl, - dict(max=self.driver.mongodb_conf.max_attempts, - queue=queue_name, - project=project)) - - raise errors.MessageConflict(queue_name, project) - - -def _is_claimed(msg, now): - return (msg['c']['id'] is not None and - msg['c']['e'] > now) - - -def _basic_message(msg, now): - oid = msg['_id'] - age = now - utils.oid_ts(oid) - - return { - 'id': str(oid), - 'age': int(age), - 'ttl': msg['t'], - 'body': msg['b'], - 'claim_id': str(msg['c']['id']) if msg['c']['id'] else None - } - - -class MessageQueueHandler(object): - - def __init__(self, driver, control_driver): - self.driver = driver - self._cache = self.driver.cache - self.queue_controller = self.driver.queue_controller - self.message_controller = self.driver.message_controller - - def delete(self, queue_name, project=None): - self.message_controller._purge_queue(queue_name, project) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def stats(self, name, project=None): - if not self.queue_controller.exists(name, project=project): - raise errors.QueueDoesNotExist(name, project) - - controller = self.message_controller - - active = controller._count(name, project=project, - include_claimed=False) - - total = controller._count(name, project=project, - include_claimed=True) - - message_stats = { - 'claimed': total - active, - 'free': active, - 'total': total, - } - - try: - oldest = controller.first(name, project=project, sort=1) - newest = controller.first(name, project=project, sort=-1) - except errors.QueueIsEmpty: - pass - else: - now = timeutils.utcnow_ts() - message_stats['oldest'] = utils.stat_message(oldest, now) - message_stats['newest'] = utils.stat_message(newest, now) - - return {'messages': message_stats} - - -def _get_scoped_query(name, project): - return {'p_q': utils.scope_queue_name(name, project)} diff --git a/zaqar/storage/mongodb/options.py b/zaqar/storage/mongodb/options.py deleted file mode 100644 index 74667601..00000000 --- a/zaqar/storage/mongodb/options.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -"""MongoDB storage driver configuration options.""" - -from oslo_config import cfg - -_deprecated_group = 'drivers:storage:mongodb' - -# options that are common to both management and message storage -_COMMON_OPTIONS = ( - - cfg.StrOpt('ssl_keyfile', - deprecated_opts=[cfg.DeprecatedOpt( - 'ssl_keyfile', - group=_deprecated_group), ], - help=('The private keyfile used to identify the local ' - 'connection against mongod. If included with the ' - '``certifle`` then only the ``ssl_certfile`` ' - 'is needed.')), - - cfg.StrOpt('ssl_certfile', - deprecated_opts=[cfg.DeprecatedOpt( - 'ssl_certfile', - group=_deprecated_group), ], - help=('The certificate file used to identify the local ' - 'connection against mongod.')), - - cfg.StrOpt('ssl_cert_reqs', default='CERT_REQUIRED', - deprecated_opts=[cfg.DeprecatedOpt( - 'ssl_cert_reqs', - group=_deprecated_group), ], - help=('Specifies whether a certificate is required from ' - 'the other side of the connection, and whether it ' - 'will be validated if provided. It must be one of ' - 'the three values ``CERT_NONE``(certificates ignored), ' - '``CERT_OPTIONAL``(not required, but validated if ' - 'provided), or ``CERT_REQUIRED``(required and ' - 'validated). If the value of this parameter is not ' - '``CERT_NONE``, then the ``ssl_ca_cert`` parameter ' - 'must point to a file of CA certificates.')), - - cfg.StrOpt('ssl_ca_certs', - deprecated_opts=[cfg.DeprecatedOpt( - 'ssl_ca_certs', - group=_deprecated_group), ], - help=('The ca_certs file contains a set of concatenated ' - '"certification authority" certificates, which are ' - 'used to validate certificates passed from the other ' - 'end of the connection.')), - - cfg.StrOpt('uri', - deprecated_opts=[cfg.DeprecatedOpt( - 'uri', - group=_deprecated_group), ], - help=('Mongodb Connection URI. If ssl connection enabled, ' - 'then ``ssl_keyfile``, ``ssl_certfile``, ' - '``ssl_cert_reqs``, ``ssl_ca_certs`` need to be set ' - 'accordingly.')), - - cfg.StrOpt('database', default='zaqar', - deprecated_opts=[cfg.DeprecatedOpt( - 'database', - group=_deprecated_group), ], - help='Database name.'), - - cfg.IntOpt('max_attempts', min=0, default=1000, - deprecated_opts=[cfg.DeprecatedOpt( - 'max_attempts', - group=_deprecated_group), ], - help=('Maximum number of times to retry a failed operation. ' - 'Currently only used for retrying a message post.')), - - cfg.FloatOpt('max_retry_sleep', default=0.1, - deprecated_opts=[cfg.DeprecatedOpt( - 'max_retry_sleep', - group=_deprecated_group), ], - help=('Maximum sleep interval between retries ' - '(actual sleep time increases linearly ' - 'according to number of attempts performed).')), - - cfg.FloatOpt('max_retry_jitter', default=0.005, - deprecated_opts=[cfg.DeprecatedOpt( - 'max_retry_jitter', - group=_deprecated_group), ], - help=('Maximum jitter interval, to be added to the ' - 'sleep interval, in order to decrease probability ' - 'that parallel requests will retry at the ' - 'same instant.')), - - cfg.IntOpt('max_reconnect_attempts', default=10, - deprecated_opts=[cfg.DeprecatedOpt( - 'max_reconnect_attempts', - group=_deprecated_group), ], - help=('Maximum number of times to retry an operation that ' - 'failed due to a primary node failover.')), - - cfg.FloatOpt('reconnect_sleep', default=0.020, - deprecated_opts=[cfg.DeprecatedOpt( - 'reconnect_sleep', - group=_deprecated_group), ], - help=('Base sleep interval between attempts to reconnect ' - 'after a primary node failover. ' - 'The actual sleep time increases exponentially (power ' - 'of 2) each time the operation is retried.')), -) - -MANAGEMENT_MONGODB_OPTIONS = _COMMON_OPTIONS -MESSAGE_MONGODB_OPTIONS = _COMMON_OPTIONS + ( - # options used only by message_store - cfg.IntOpt('partitions', default=2, - deprecated_opts=[cfg.DeprecatedOpt( - 'partitions', - group=_deprecated_group), ], - help=('Number of databases across which to ' - 'partition message data, in order to ' - 'reduce writer lock %. DO NOT change ' - 'this setting after initial deployment. ' - 'It MUST remain static. Also, you ' - 'should not need a large number of partitions ' - 'to improve performance, esp. if deploying ' - 'MongoDB on SSD storage.')), -) - -MANAGEMENT_MONGODB_GROUP = 'drivers:management_store:mongodb' -MESSAGE_MONGODB_GROUP = 'drivers:message_store:mongodb' - - -def _config_options(): - """Used by config generators. - - Returns a list of (group-name, oslo_config-options) tuples - for management and message storage. - """ - return [(MANAGEMENT_MONGODB_GROUP, MANAGEMENT_MONGODB_OPTIONS), - (MESSAGE_MONGODB_GROUP, MESSAGE_MONGODB_OPTIONS)] diff --git a/zaqar/storage/mongodb/pools.py b/zaqar/storage/mongodb/pools.py deleted file mode 100644 index a7270c6c..00000000 --- a/zaqar/storage/mongodb/pools.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -"""pools: an implementation of the pool management storage -controller for mongodb. - -Schema: - 'n': name :: six.text_type - 'u': uri :: six.text_type - 'w': weight :: int - 'o': options :: dict -""" - -import functools -from pymongo import errors as mongo_error - -from zaqar.common import utils as common_utils -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - -POOLS_INDEX = [ - ('n', 1) -] - -URI_INDEX = [ - ('u', 1) -] - -# NOTE(cpp-cabrera): used for get/list operations. There's no need to -# show the marker or the _id - they're implementation details. -OMIT_FIELDS = (('_id', False),) - - -def _field_spec(detailed=False): - return dict(OMIT_FIELDS + (() if detailed else (('o', False),))) - - -class PoolsController(base.PoolsBase): - - def __init__(self, *args, **kwargs): - super(PoolsController, self).__init__(*args, **kwargs) - - self._col = self.driver.database.pools - self._col.ensure_index(POOLS_INDEX, - background=True, - name='pools_name', - unique=True) - - self._col.ensure_index(URI_INDEX, - background=True, - name='pools_uri', - unique=True) - - @utils.raises_conn_error - def _list(self, marker=None, limit=10, detailed=False): - query = {} - if marker is not None: - query['n'] = {'$gt': marker} - - cursor = self._col.find(query, projection=_field_spec(detailed), - limit=limit).sort('n') - marker_name = {} - - def normalizer(pool): - marker_name['next'] = pool['n'] - return _normalize(pool, detailed=detailed) - - yield utils.HookedCursor(cursor, normalizer) - yield marker_name and marker_name['next'] - - @utils.raises_conn_error - def _get(self, name, detailed=False): - res = self._col.find_one({'n': name}, - _field_spec(detailed)) - if not res: - raise errors.PoolDoesNotExist(name) - - return _normalize(res, detailed) - - @utils.raises_conn_error - def _get_pools_by_group(self, group=None, detailed=False): - cursor = self._col.find({'g': group}, projection=_field_spec(detailed)) - normalizer = functools.partial(_normalize, detailed=detailed) - return utils.HookedCursor(cursor, normalizer) - - @utils.raises_conn_error - def _create(self, name, weight, uri, group=None, options=None): - options = {} if options is None else options - try: - self._col.update({'n': name}, - {'$set': {'n': name, - 'w': weight, - 'u': uri, - 'g': group, - 'o': options}}, - upsert=True) - except mongo_error.DuplicateKeyError: - raise errors.PoolAlreadyExists() - - @utils.raises_conn_error - def _exists(self, name): - return self._col.find_one({'n': name}) is not None - - @utils.raises_conn_error - def _update(self, name, **kwargs): - names = ('uri', 'weight', 'group', 'options') - fields = common_utils.fields(kwargs, names, - pred=lambda x: x is not None, - key_transform=lambda x: x[0]) - assert fields, ('`weight`, `uri`, `group`, ' - 'or `options` not found in kwargs') - - res = self._col.update({'n': name}, - {'$set': fields}, - upsert=False) - if not res['updatedExisting']: - raise errors.PoolDoesNotExist(name) - - @utils.raises_conn_error - def _delete(self, name): - # NOTE(wpf): Initializing the Flavors controller here instead of - # doing so in __init__ is required to avoid falling in a maximum - # recursion error. - try: - pool = self.get(name) - pools_group = self.get_pools_by_group(pool['group']) - flavor_ctl = self.driver.flavors_controller - res = list(flavor_ctl._list_by_pool_group(pool['group'])) - - # NOTE(flaper87): If this is the only pool in the - # group and it's being used by a flavor, don't allow - # it to be deleted. - if res and len(pools_group) == 1: - flavors = ', '.join([x['name'] for x in res]) - raise errors.PoolInUseByFlavor(name, flavors) - - self._col.delete_one({'n': name}) - except errors.PoolDoesNotExist: - pass - - @utils.raises_conn_error - def _drop_all(self): - self._col.drop() - self._col.ensure_index(POOLS_INDEX, unique=True) - - -def _normalize(pool, detailed=False): - ret = { - 'name': pool['n'], - 'group': pool['g'], - 'uri': pool['u'], - 'weight': pool['w'], - } - if detailed: - ret['options'] = pool['o'] - - return ret diff --git a/zaqar/storage/mongodb/queues.py b/zaqar/storage/mongodb/queues.py deleted file mode 100644 index 4a0c0519..00000000 --- a/zaqar/storage/mongodb/queues.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements the MongoDB storage controller for queues. - -Field Mappings: - In order to reduce the disk / memory space used, - field names will be, most of the time, the first - letter of their long name. -""" - -from oslo_log import log as logging -from oslo_utils import timeutils -import pymongo.errors - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - -LOG = logging.getLogger(__name__) - -# NOTE(kgriffs): E.g.: 'queuecontroller:exists:5083853/my-queue' -_QUEUE_CACHE_PREFIX = 'queuecontroller:' - -# NOTE(kgriffs): This causes some race conditions, but they are -# harmless. If a queue was deleted, but we are still returning -# that it exists, some messages may get inserted without the -# client getting an error. In this case, those messages would -# be orphaned and expire eventually according to their TTL. -# -# What this means for the client is that they have a bug; they -# deleted a queue and then immediately tried to post messages -# to it. If they keep trying to use the queue, they will -# eventually start getting an error, once the cache entry -# expires, which should clue them in on what happened. -# -# TODO(kgriffs): Make dynamic? -_QUEUE_CACHE_TTL = 5 - - -def _queue_exists_key(queue, project=None): - # NOTE(kgriffs): Use string concatenation for performance, - # also put project first since it is guaranteed to be - # unique, which should reduce lookup time. - return _QUEUE_CACHE_PREFIX + 'exists:' + str(project) + '/' + queue - - -class QueueController(storage.Queue): - """Implements queue resource operations using MongoDB. - - Queues are scoped by project, which is prefixed to the - queue name. - - :: - - Queues: - - Name Field - --------------------- - name -> p_q - msg counter -> c - metadata -> m - - Message Counter: - - Name Field - ------------------- - value -> v - modified ts -> t - """ - - def __init__(self, *args, **kwargs): - super(QueueController, self).__init__(*args, **kwargs) - - self._cache = self.driver.cache - self._collection = self.driver.queues_database.queues - - # NOTE(flaper87): This creates a unique index for - # project and name. Using project as the prefix - # allows for querying by project and project+name. - # This is also useful for retrieving the queues list for - # a specific project, for example. Order matters! - self._collection.ensure_index([('p_q', 1)], unique=True) - - # ---------------------------------------------------------------------- - # Helpers - # ---------------------------------------------------------------------- - - def _get_counter(self, name, project=None): - """Retrieves the current message counter value for a given queue. - - This helper is used to generate monotonic pagination - markers that are saved as part of the message - document. - - Note 1: Markers are scoped per-queue and so are *not* - globally unique or globally ordered. - - Note 2: If two or more requests to this method are made - in parallel, this method will return the same counter - value. This is done intentionally so that the caller - can detect a parallel message post, allowing it to - mitigate race conditions between producer and - observer clients. - - :param name: Name of the queue to which the counter is scoped - :param project: Queue's project - :returns: current message counter as an integer - """ - - doc = self._collection.find_one(_get_scoped_query(name, project), - projection={'c.v': 1, '_id': 0}) - - if doc is None: - raise errors.QueueDoesNotExist(name, project) - - return doc['c']['v'] - - def _inc_counter(self, name, project=None, amount=1, window=None): - """Increments the message counter and returns the new value. - - :param name: Name of the queue to which the counter is scoped - :param project: Queue's project name - :param amount: (Default 1) Amount by which to increment the counter - :param window: (Default None) A time window, in seconds, that - must have elapsed since the counter was last updated, in - order to increment the counter. - - :returns: Updated message counter value, or None if window - was specified, and the counter has already been updated - within the specified time period. - - :raises QueueDoesNotExist: if not found - """ - now = timeutils.utcnow_ts() - - update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} - query = _get_scoped_query(name, project) - if window is not None: - threshold = now - window - query['c.t'] = {'$lt': threshold} - - while True: - try: - doc = self._collection.find_and_modify( - query, update, new=True, projection={'c.v': 1, '_id': 0}) - - break - except pymongo.errors.AutoReconnect as ex: - LOG.exception(ex) - - if doc is None: - if window is None: - # NOTE(kgriffs): Since we did not filter by a time window, - # the queue should have been found and updated. Perhaps - # the queue has been deleted? - message = _(u'Failed to increment the message ' - u'counter for queue %(name)s and ' - u'project %(project)s') - message %= dict(name=name, project=project) - - LOG.warning(message) - - raise errors.QueueDoesNotExist(name, project) - - # NOTE(kgriffs): Assume the queue existed, but the counter - # was recently updated, causing the range query on 'c.t' to - # exclude the record. - return None - - return doc['c']['v'] - - # ---------------------------------------------------------------------- - # Interface - # ---------------------------------------------------------------------- - - def _get(self, name, project=None): - try: - return self.get_metadata(name, project) - except errors.QueueDoesNotExist: - return {} - - def _list(self, project=None, marker=None, - limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False): - - query = utils.scoped_query(marker, project) - - projection = {'p_q': 1, '_id': 0} - if detailed: - projection['m'] = 1 - - cursor = self._collection.find(query, projection=projection) - cursor = cursor.limit(limit).sort('p_q') - marker_name = {} - - def normalizer(record): - queue = {'name': utils.descope_queue_name(record['p_q'])} - marker_name['next'] = queue['name'] - if detailed: - queue['metadata'] = record['m'] - return queue - - yield utils.HookedCursor(cursor, normalizer) - yield marker_name and marker_name['next'] - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def get_metadata(self, name, project=None): - queue = self._collection.find_one(_get_scoped_query(name, project), - projection={'m': 1, '_id': 0}) - if queue is None: - raise errors.QueueDoesNotExist(name, project) - - return queue.get('m', {}) - - @utils.raises_conn_error - # @utils.retries_on_autoreconnect - def _create(self, name, metadata=None, project=None): - # NOTE(flaper87): If the connection fails after it was called - # and we retry to insert the queue, we could end up returning - # `False` because of the `DuplicatedKeyError` although the - # queue was indeed created by this API call. - # - # TODO(kgriffs): Commented out `retries_on_autoreconnect` for - # now due to the above issue, since creating a queue is less - # important to make super HA. - - try: - # NOTE(kgriffs): Start counting at 1, and assume the first - # message ever posted will succeed and set t to a UNIX - # "modified at" timestamp. - counter = {'v': 1, 't': 0} - - scoped_name = utils.scope_queue_name(name, project) - self._collection.insert({'p_q': scoped_name, 'm': metadata or {}, - 'c': counter}) - - except pymongo.errors.DuplicateKeyError: - return False - else: - return True - - # NOTE(kgriffs): Only cache when it exists; if it doesn't exist, and - # someone creates it, we want it to be immediately visible. - @utils.raises_conn_error - @utils.retries_on_autoreconnect - @decorators.caches(_queue_exists_key, _QUEUE_CACHE_TTL, lambda v: v) - def _exists(self, name, project=None): - query = _get_scoped_query(name, project) - return self._collection.find_one(query) is not None - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def set_metadata(self, name, metadata, project=None): - rst = self._collection.update(_get_scoped_query(name, project), - {'$set': {'m': metadata}}, - multi=False, - manipulate=False) - - if not rst['updatedExisting']: - raise errors.QueueDoesNotExist(name, project) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - @_exists.purges - def _delete(self, name, project=None): - self._collection.delete_one(_get_scoped_query(name, project)) - - @utils.raises_conn_error - @utils.retries_on_autoreconnect - def _stats(self, name, project=None): - pass - - -def _get_scoped_query(name, project): - return {'p_q': utils.scope_queue_name(name, project)} diff --git a/zaqar/storage/mongodb/subscriptions.py b/zaqar/storage/mongodb/subscriptions.py deleted file mode 100644 index d6b3078e..00000000 --- a/zaqar/storage/mongodb/subscriptions.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2014 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -import datetime - -from oslo_utils import timeutils -import pymongo.errors - -from zaqar.common import utils as common_utils -from zaqar import storage -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.mongodb import utils - -ID_INDEX_FIELDS = [('_id', 1)] - -SUBSCRIPTIONS_INDEX = [ - ('s', 1), - ('u', 1), - ('p', 1), -] - -# For removing expired subscriptions -TTL_INDEX_FIELDS = [ - ('e', 1), -] - - -class SubscriptionController(base.Subscription): - """Implements subscription resource operations using MongoDB. - - Subscriptions are unique by project + queue/topic + subscriber. - - Schema: - 's': source :: six.text_type - 'u': subscriber:: six.text_type - 't': ttl:: int - 'e': expires: datetime.datetime - 'o': options :: dict - 'p': project :: six.text_type - 'c': confirmed :: boolean - """ - - def __init__(self, *args, **kwargs): - super(SubscriptionController, self).__init__(*args, **kwargs) - self._collection = self.driver.subscriptions_database.subscriptions - self._collection.ensure_index(SUBSCRIPTIONS_INDEX, unique=True) - # NOTE(flwang): MongoDB will automatically delete the subscription - # from the subscriptions collection when the subscription's 'e' value - # is older than the number of seconds specified in expireAfterSeconds, - # i.e. 0 seconds older in this case. As such, the data expires at the - # specified 'e' value. - self._collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', - expireAfterSeconds=0, - background=True) - - @utils.raises_conn_error - def list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE): - query = {'s': queue, 'p': project} - if marker is not None: - query['_id'] = {'$gt': utils.to_oid(marker)} - - projection = {'s': 1, 'u': 1, 't': 1, 'p': 1, 'o': 1, '_id': 1, 'c': 1} - - cursor = self._collection.find(query, projection=projection) - cursor = cursor.limit(limit).sort('_id') - marker_name = {} - - now = timeutils.utcnow_ts() - - def normalizer(record): - marker_name['next'] = record['_id'] - - return _basic_subscription(record, now) - - yield utils.HookedCursor(cursor, normalizer) - yield marker_name and marker_name['next'] - - @utils.raises_conn_error - def get(self, queue, subscription_id, project=None): - res = self._collection.find_one({'_id': utils.to_oid(subscription_id), - 'p': project, - 's': queue}) - - if not res: - raise errors.SubscriptionDoesNotExist(subscription_id) - - now = timeutils.utcnow_ts() - return _basic_subscription(res, now) - - @utils.raises_conn_error - def create(self, queue, subscriber, ttl, options, project=None): - source = queue - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - expires = now_dt + datetime.timedelta(seconds=ttl) - confirmed = False - - try: - subscription_id = self._collection.insert({'s': source, - 'u': subscriber, - 't': ttl, - 'e': expires, - 'o': options, - 'p': project, - 'c': confirmed}) - return subscription_id - except pymongo.errors.DuplicateKeyError: - return None - - @utils.raises_conn_error - def exists(self, queue, subscription_id, project=None): - return self._collection.find_one({'_id': utils.to_oid(subscription_id), - 'p': project}) is not None - - @utils.raises_conn_error - def update(self, queue, subscription_id, project=None, **kwargs): - names = ('subscriber', 'ttl', 'options') - key_transform = lambda x: 'u' if x == 'subscriber' else x[0] - fields = common_utils.fields(kwargs, names, - pred=lambda x: x is not None, - key_transform=key_transform) - assert fields, ('`subscriber`, `ttl`, ' - 'or `options` not found in kwargs') - - new_ttl = fields.get('t') - if new_ttl is not None: - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - expires = now_dt + datetime.timedelta(seconds=new_ttl) - fields['e'] = expires - - try: - res = self._collection.update( - {'_id': utils.to_oid(subscription_id), - 'p': project, - 's': queue}, - {'$set': fields}, - upsert=False) - except pymongo.errors.DuplicateKeyError: - raise errors.SubscriptionAlreadyExists() - if not res['updatedExisting']: - raise errors.SubscriptionDoesNotExist(subscription_id) - - @utils.raises_conn_error - def delete(self, queue, subscription_id, project=None): - self._collection.delete_one({'_id': utils.to_oid(subscription_id), - 'p': project, - 's': queue}) - - @utils.raises_conn_error - def get_with_subscriber(self, queue, subscriber, project=None): - res = self._collection.find_one({'u': subscriber, - 'p': project}) - now = timeutils.utcnow_ts() - return _basic_subscription(res, now) - - @utils.raises_conn_error - def confirm(self, queue, subscription_id, project=None, confirmed=True): - - res = self._collection.update({'_id': utils.to_oid(subscription_id), - 'p': project}, - {'$set': {'c': confirmed}}, - upsert=False) - if not res['updatedExisting']: - raise errors.SubscriptionDoesNotExist(subscription_id) - - -def _basic_subscription(record, now): - # NOTE(Eva-i): unused here record's field 'e' (expires) has changed it's - # format from int (timestamp) to datetime since patch - # 1d122b1671792aff0055ed5396111cd441fb8269. Any future change about - # starting using 'e' field should make sure support both of the formats. - oid = record['_id'] - age = now - utils.oid_ts(oid) - confirmed = record.get('c', True) - return { - 'id': str(oid), - 'source': record['s'], - 'subscriber': record['u'], - 'ttl': record['t'], - 'age': int(age), - 'options': record['o'], - 'confirmed': confirmed, - } diff --git a/zaqar/storage/mongodb/utils.py b/zaqar/storage/mongodb/utils.py deleted file mode 100644 index 1a552280..00000000 --- a/zaqar/storage/mongodb/utils.py +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division -import binascii -import collections -import datetime -import functools -import random -import time - -from bson import errors as berrors -from bson import objectid -from bson import tz_util -from oslo_log import log as logging -from oslo_utils import timeutils -from pymongo import errors - -from zaqar.storage import errors as storage_errors - - -# BSON ObjectId gives TZ-aware datetime, so we generate a -# TZ-aware UNIX epoch for convenience. -EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=tz_util.utc) - -# NOTE(cpp-cabrera): the authoritative form of project/queue keys. -PROJ_QUEUE_KEY = 'p_q' - -LOG = logging.getLogger(__name__) - - -def cached_gen(iterable): - """Converts the iterable into a caching generator. - - Returns a proxy that yields each item of iterable, while at - the same time caching those items in a deque. - - :param iterable: an iterable to wrap in a caching generator - - :returns: (proxy(iterable), cached_items) - """ - cached_items = collections.deque() - - def generator(iterable): - for item in iterable: - cached_items.append(item) - yield item - - return generator(iterable), cached_items - - -def calculate_backoff(attempt, max_attempts, max_sleep, max_jitter=0): - """Calculates backoff time, in seconds, when retrying an operation. - - This function calculates a simple linear backoff time with - optional jitter, useful for retrying a request under high - concurrency. - - The result may be passed directly into time.sleep() in order to - mitigate stampeding herd syndrome and introduce backpressure towards - the clients, slowing them down. - - :param attempt: current value of the attempt counter (zero-based) - :param max_attempts: maximum number of attempts that will be tried - :param max_sleep: maximum sleep value to apply before jitter, assumed - to be seconds. Fractional seconds are supported to 1 ms - granularity. - :param max_jitter: maximum jitter value to add to the baseline sleep - time. Actual value will be chosen randomly. - - :raises ValueError: if the parameter is not invalid - :returns: float representing the number of seconds to sleep, within - the interval [0, max_sleep), determined linearly according to - the ratio attempt / max_attempts, with optional jitter. - """ - - if max_sleep < 0: - raise ValueError(u'max_sleep must be >= 0') - - if max_jitter < 0: - raise ValueError(u'max_jitter must be >= 0') - - if not (0 <= attempt < max_attempts): - raise ValueError(u'attempt value is out of range') - - ratio = attempt / max_attempts - backoff_sec = ratio * max_sleep - jitter_sec = random.random() * max_jitter - - return backoff_sec + jitter_sec - - -def to_oid(obj): - """Creates a new ObjectId based on the input. - - Returns None when TypeError or berrors.InvalidId - is raised by the ObjectId class. - - :param obj: Anything that can be passed as an - input to `objectid.ObjectId` - """ - try: - return objectid.ObjectId(obj) - except (TypeError, berrors.InvalidId): - return None - - -def oid_ts(oid): - """Converts an ObjectId to a UNIX timestamp. - - :raises TypeError: if oid isn't an ObjectId - """ - try: - return timeutils.delta_seconds(EPOCH, oid.generation_time) - except AttributeError: - raise TypeError(u'Expected ObjectId and got %s' % type(oid)) - - -def stat_message(message, now): - """Creates a stat document from the given message, relative to now.""" - msg_id = message['id'] - created = oid_ts(to_oid(msg_id)) - age = now - created - - return { - 'id': msg_id, - 'age': int(age), - 'created': timeutils.iso8601_from_timestamp(created), - } - - -def normalize_none_str(string_or_none): - """Returns '' IFF given value is None, passthrough otherwise. - - This function normalizes None to the empty string to facilitate - string concatenation when a variable could be None. - """ - return '' if string_or_none is None else string_or_none - - -def scope_queue_name(queue=None, project=None): - """Returns a scoped name for a queue based on project and queue. - - If only the project name is specified, a scope signifying "all queues" - for that project is returned. If neither queue nor project are - specified, a scope for "all global queues" is returned, which - is to be interpreted as excluding queues scoped by project. - - :returns: '{project}/{queue}' if project and queue are given, - '{project}/' if ONLY project is given, '/{queue}' if ONLY - queue is given, and '/' if neither are given. - """ - - # NOTE(kgriffs): Concatenation is faster than format, and - # put project first since it is guaranteed to be unique. - return normalize_none_str(project) + '/' + normalize_none_str(queue) - - -def descope_queue_name(scoped_name): - """Returns the unscoped queue name, given a fully-scoped name.""" - - # NOTE(kgriffs): scoped_name can be either '/', '/global-queue-name', - # or 'project-id/queue-name'. - return scoped_name.partition('/')[2] or None - - -def parse_scoped_project_queue(scoped_name): - """Returns the project and queue name for a scoped catalogue entry. - - :param scoped_name: a project/queue as given by :scope_queue_name: - :type scoped_name: six.text_type - :returns: (project, queue) - :rtype: (six.text_type, six.text_type) - """ - return scoped_name.split('/') - - -def scoped_query(queue, project): - """Returns a dict usable for querying for scoped project/queues. - - :param queue: name of queue to seek - :type queue: six.text_type - :param project: namespace - :type project: six.text_type - :param key: query key to use - :type key: six.text_type - :returns: query to issue - :rtype: dict - """ - key = PROJ_QUEUE_KEY - query = {} - scoped_name = scope_queue_name(queue, project) - - if not scoped_name.startswith('/'): - # NOTE(kgriffs): scoped queue, e.g., 'project-id/queue-name' - project_prefix = '^' + project + '/' - query[key] = {'$regex': project_prefix, '$gt': scoped_name} - elif scoped_name == '/': - # NOTE(kgriffs): list global queues, but exclude scoped ones - query[key] = {'$regex': '^/'} - else: - # NOTE(kgriffs): unscoped queue, e.g., '/my-global-queue' - query[key] = {'$regex': '^/', '$gt': scoped_name} - - return query - - -def get_partition(num_partitions, queue, project=None): - """Get the partition number for a given queue and project. - - Hashes the queue to a partition number. The hash is stable, - meaning given the same queue name and project ID, the same - partition number will always be returned. Note also that - queues will be uniformly distributed across partitions. - - The number of partitions is taken from the "partitions" - property in the config file, under the [drivers:storage:mongodb] - section. - """ - - name = project + queue if project is not None else queue - - # NOTE(kgriffs): For small numbers of partitions, crc32 will - # provide a uniform distribution. This was verified experimentally - # with up to 100 partitions. - return binascii.crc32(name.encode('utf-8')) % num_partitions - - -def raises_conn_error(func): - """Handles the MongoDB ConnectionFailure error. - - This decorator catches MongoDB's ConnectionFailure - error and raises Zaqar's ConnectionError instead. - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except errors.ConnectionFailure as ex: - LOG.exception(ex) - raise storage_errors.ConnectionError() - - return wrapper - - -def retries_on_autoreconnect(func): - """Causes the wrapped function to be re-called on AutoReconnect. - - This decorator catches MongoDB's AutoReconnect error and retries - the function call. - - .. Note:: - Assumes that the decorated function has defined self.driver.mongodb_conf - so that `max_reconnect_attempts` and `reconnect_sleep` can be taken - into account. - - .. Warning:: The decorated function must be idempotent. - """ - - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - # TODO(kgriffs): Figure out a way to not have to rely on the - # presence of `mongodb_conf` - max_attemps = self.driver.mongodb_conf.max_reconnect_attempts - sleep_sec = self.driver.mongodb_conf.reconnect_sleep - - last_ex = None - for attempt in range(max_attemps): - try: - return func(self, *args, **kwargs) - break - - except errors.AutoReconnect as ex: - LOG.warning(u'Caught AutoReconnect, retrying the ' - 'call to {0}'.format(func)) - - last_ex = ex - time.sleep(sleep_sec * (2 ** attempt)) - else: - LOG.error(u'Caught AutoReconnect, maximum attempts ' - 'to {0} exceeded.'.format(func)) - - raise last_ex - - return wrapper - - -class HookedCursor(object): - - def __init__(self, cursor, denormalizer): - self.cursor = cursor - self.denormalizer = denormalizer - - def __getattr__(self, attr): - return getattr(self.cursor, attr) - - def __iter__(self): - return self - - def __len__(self): - return self.cursor.count(True) - - @raises_conn_error - def next(self): - item = next(self.cursor) - return self.denormalizer(item) - - def __next__(self): - return self.next() diff --git a/zaqar/storage/pipeline.py b/zaqar/storage/pipeline.py deleted file mode 100644 index 4d3314d7..00000000 --- a/zaqar/storage/pipeline.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_config import cfg -from oslo_log import log as logging -from osprofiler import profiler -from stevedore import driver -from stevedore import extension - -from zaqar import common -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import base - -LOG = logging.getLogger(__name__) - -_PIPELINE_RESOURCES = ('queue', 'message', 'claim', 'subscription') - -_PIPELINE_CONFIGS = tuple(( - cfg.ListOpt(resource + '_pipeline', default=[], - help=_('Pipeline to use for processing {0} operations. ' - 'This pipeline will be consumed before calling ' - 'the storage driver\'s controller methods.') - .format(resource)) - for resource in _PIPELINE_RESOURCES -)) - -_PIPELINE_GROUP = 'storage' - - -def _config_options(): - return [(_PIPELINE_GROUP, _PIPELINE_CONFIGS)] - - -def _get_storage_pipeline(resource_name, conf, *args, **kwargs): - """Constructs and returns a storage resource pipeline. - - This is a helper function for any service supporting - pipelines for the storage layer. The function returns - a pipeline based on the `{resource_name}_pipeline` - config option. - - Stages in the pipeline implement controller methods - that they want to hook. A stage can halt the - pipeline immediate by returning a value that is - not None; otherwise, processing will continue - to the next stage, ending with the actual storage - controller. - - :param conf: Configuration instance. - :type conf: `cfg.ConfigOpts` - - :returns: A pipeline to use. - :rtype: `Pipeline` - """ - conf.register_opts(_PIPELINE_CONFIGS, - group=_PIPELINE_GROUP) - - storage_conf = conf[_PIPELINE_GROUP] - - pipeline = [] - for ns in storage_conf[resource_name + '_pipeline']: - try: - mgr = driver.DriverManager('zaqar.storage.stages', - ns, - invoke_args=args, - invoke_kwds=kwargs, - invoke_on_load=True) - pipeline.append(mgr.driver) - except RuntimeError as exc: - LOG.warning(u'Stage %(stage)s could not be imported: %(ex)s', - {'stage': ns, 'ex': str(exc)}) - continue - - return pipeline - - -def _get_builtin_entry_points(resource_name, storage, control_driver, conf): - # Load builtin stages - builtin_entry_points = [] - - # NOTE(flaper87): The namespace will look like: - # `zaqar.storage.$STORAGE.driver.stages`. For now, - # the builtin stages are bound to a single store and - # are not applied to every store. - namespace = '%s.%s.stages' % (storage.__module__, resource_name) - extensions = extension.ExtensionManager(namespace, - invoke_on_load=True, - invoke_args=[storage, - control_driver]) - - if len(extensions.extensions) == 0: - return [] - - for ext in extensions.extensions: - builtin_entry_points.append(ext.obj) - if conf.profiler.enabled and conf.profiler.trace_message_store: - return (profiler.trace_cls("stages_controller") - (builtin_entry_points)) - return builtin_entry_points - - -class DataDriver(base.DataDriverBase): - """Meta-driver for injecting pipelines in front of controllers. - - :param conf: Configuration from which to load pipeline settings - :param storage: Storage driver that will service requests as the - last step in the pipeline - """ - - def __init__(self, conf, storage, control_driver): - # NOTE(kgriffs): Pass None for cache since it won't ever - # be referenced. - super(DataDriver, self).__init__(conf, None, control_driver) - self._storage = storage - - @property - def capabilities(self): - return self._storage.capabilities() - - def close(self): - self._storage.close() - - def is_alive(self): - return self._storage.is_alive() - - def _health(self): - return self._storage._health() - - @decorators.lazy_property(write=False) - def queue_controller(self): - stages = _get_builtin_entry_points('queue', self._storage, - self.control_driver, self.conf) - stages.extend(_get_storage_pipeline('queue', self.conf)) - stages.append(self._storage.queue_controller) - return common.Pipeline(stages) - - @decorators.lazy_property(write=False) - def message_controller(self): - stages = _get_builtin_entry_points('message', self._storage, - self.control_driver, self.conf) - kwargs = {'subscription_controller': - self._storage.subscription_controller, - 'max_notifier_workers': - self.conf.notification.max_notifier_workers, - 'require_confirmation': - self.conf.notification.require_confirmation} - stages.extend(_get_storage_pipeline('message', self.conf, **kwargs)) - stages.append(self._storage.message_controller) - return common.Pipeline(stages) - - @decorators.lazy_property(write=False) - def claim_controller(self): - stages = _get_builtin_entry_points('claim', self._storage, - self.control_driver, self.conf) - stages.extend(_get_storage_pipeline('claim', self.conf)) - stages.append(self._storage.claim_controller) - return common.Pipeline(stages) - - @decorators.lazy_property(write=False) - def subscription_controller(self): - stages = _get_builtin_entry_points('subscription', self._storage, - self.control_driver, self.conf) - stages.extend(_get_storage_pipeline('subscription', self.conf)) - stages.append(self._storage.subscription_controller) - return common.Pipeline(stages) diff --git a/zaqar/storage/pooling.py b/zaqar/storage/pooling.py deleted file mode 100644 index c3800bd0..00000000 --- a/zaqar/storage/pooling.py +++ /dev/null @@ -1,687 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# Copyright 2014 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import heapq -import itertools - -from oslo_config import cfg -from oslo_log import log -from osprofiler import profiler - -from zaqar.common import decorators -from zaqar.common import errors as cerrors -from zaqar.common.storage import select -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage import pipeline -from zaqar.storage import utils - -LOG = log.getLogger(__name__) - -_CATALOG_OPTIONS = ( - cfg.BoolOpt('enable_virtual_pool', default=False, - help=('If enabled, the message_store will be used ' - 'as the storage for the virtual pool.')), -) - -_CATALOG_GROUP = 'pooling:catalog' - -# NOTE(kgriffs): E.g.: 'zaqar-pooling:5083853/my-queue' -_POOL_CACHE_PREFIX = 'pooling:' - -# TODO(kgriffs): If a queue is migrated, everyone's -# caches need to have the relevant entry invalidated -# before "unfreezing" the queue, rather than waiting -# on the TTL. -# -# TODO(kgriffs): Make configurable? -_POOL_CACHE_TTL = 10 - - -def _config_options(): - return [(_CATALOG_GROUP, _CATALOG_OPTIONS)] - - -def _pool_cache_key(queue, project=None): - # NOTE(kgriffs): Use string concatenation for performance, - # also put project first since it is guaranteed to be - # unique, which should reduce lookup time. - return _POOL_CACHE_PREFIX + str(project) + '/' + queue - - -class DataDriver(storage.DataDriverBase): - """Pooling meta-driver for routing requests to multiple backends. - - :param conf: Configuration from which to read pooling options - :param cache: Cache instance that will be passed to individual - storage driver instances that correspond to each pool. will - also be used by the pool controller to reduce latency for - some operations. - """ - - BASE_CAPABILITIES = tuple(storage.Capabilities) - - def __init__(self, conf, cache, control, control_driver=None): - super(DataDriver, self).__init__(conf, cache, control_driver) - catalog = Catalog(conf, cache, control) - if self.conf.profiler.enabled: - catalog = profiler.trace_cls("pooling_catalogue_" - "controller")(catalog) - self._pool_catalog = catalog - - @property - def capabilities(self): - # NOTE(flaper87): We can't know the capabilities - # of this driver because pools are loaded based on - # the queue and project of the request. Therefore, - # we will just assume all capabilities are supported. - # This shouldn't be an issue because the pooling driver - # is neither used for pools creation nor flavor creation. - return self.BASE_CAPABILITIES - - def close(self): - cursor = self._pool_catalog._pools_ctrl.list(limit=0) - # Messages of each pool - for pool in next(cursor): - driver = self._pool_catalog.get_driver(pool['name']) - driver.close() - - def is_alive(self): - cursor = self._pool_catalog._pools_ctrl.list(limit=0) - pools = next(cursor) - return all(self._pool_catalog.get_driver(pool['name']).is_alive() - for pool in pools) - - def _health(self): - KPI = {} - # Leverage the is_alive to indicate if the backend storage is - # reachable or not - KPI['catalog_reachable'] = self.is_alive() - - cursor = self._pool_catalog._pools_ctrl.list(limit=0) - # Messages of each pool - for pool in next(cursor): - driver = self._pool_catalog.get_driver(pool['name']) - KPI[pool['name']] = driver._health() - - return KPI - - def gc(self): - cursor = self._pool_catalog._pools_ctrl.list(limit=0) - for pool in next(cursor): - driver = self._pool_catalog.get_driver(pool['name']) - driver.gc() - - @decorators.lazy_property(write=False) - def queue_controller(self): - controller = QueueController(self._pool_catalog) - if self.conf.profiler.enabled: - return profiler.trace_cls("pooling_queue_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def message_controller(self): - controller = MessageController(self._pool_catalog) - if self.conf.profiler.enabled: - return profiler.trace_cls("pooling_message_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def claim_controller(self): - controller = ClaimController(self._pool_catalog) - if self.conf.profiler.enabled: - return profiler.trace_cls("pooling_claim_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def subscription_controller(self): - controller = SubscriptionController(self._pool_catalog) - if self.conf.profiler.enabled: - return (profiler.trace_cls("pooling_subscription_controller") - (controller)) - else: - return controller - - -class QueueController(storage.Queue): - """Routes operations to get the appropriate queue controller. - - :param pool_catalog: a catalog of available pools - :type pool_catalog: queues.pooling.base.Catalog - """ - - def __init__(self, pool_catalog): - super(QueueController, self).__init__(None) - self._pool_catalog = pool_catalog - self._mgt_queue_ctrl = self._pool_catalog.control.queue_controller - self._get_controller = self._pool_catalog.get_queue_controller - - def _list(self, project=None, marker=None, - limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False): - - def all_pages(): - yield next(self._mgt_queue_ctrl.list( - project=project, - marker=marker, - limit=limit, - detailed=detailed)) - - # make a heap compared with 'name' - ls = heapq.merge(*[ - utils.keyify('name', page) - for page in all_pages() - ]) - - marker_name = {} - - # limit the iterator and strip out the comparison wrapper - def it(): - for queue_cmp in itertools.islice(ls, limit): - marker_name['next'] = queue_cmp.obj['name'] - yield queue_cmp.obj - - yield it() - yield marker_name and marker_name['next'] - - def _get(self, name, project=None): - try: - return self.get_metadata(name, project) - except errors.QueueDoesNotExist: - return {} - - def _create(self, name, metadata=None, project=None): - flavor = None - if isinstance(metadata, dict): - flavor = metadata.get('_flavor') - - self._pool_catalog.register(name, project=project, flavor=flavor) - - # NOTE(cpp-cabrera): This should always succeed since we just - # registered the project/queue. There is a race condition, - # however. If between the time we register a queue and go to - # look it up, the queue is deleted, then this assertion will - # fail. - pool = self._pool_catalog.lookup(name, project) - if not pool: - raise RuntimeError('Failed to register queue') - return self._mgt_queue_ctrl.create(name, metadata=metadata, - project=project) - - def _delete(self, name, project=None): - mqHandler = self._get_controller(name, project) - if mqHandler: - # NOTE(cpp-cabrera): delete from the catalogue first. If - # zaqar crashes in the middle of these two operations, - # it is desirable that the entry be missing from the - # catalogue and present in storage, rather than the - # reverse. The former case leads to all operations - # behaving as expected: 404s across the board, and a - # functionally equivalent 204 on a create queue. The - # latter case is more difficult to reason about, and may - # yield 500s in some operations. - self._pool_catalog.deregister(name, project) - mqHandler.delete(name, project) - - return self._mgt_queue_ctrl.delete(name, project) - - def _exists(self, name, project=None): - return self._mgt_queue_ctrl.exists(name, project=project) - - def get_metadata(self, name, project=None): - return self._mgt_queue_ctrl.get_metadata(name, project=project) - - def set_metadata(self, name, metadata, project=None): - return self._mgt_queue_ctrl.set_metadata(name, metadata=metadata, - project=project) - - def _stats(self, name, project=None): - mqHandler = self._get_controller(name, project) - if mqHandler: - return mqHandler.stats(name, project=project) - raise errors.QueueDoesNotExist(name, project) - - -class MessageController(storage.Message): - """Routes operations to a message controller in the appropriate pool. - - :param pool_catalog: a catalog of available pools - :type pool_catalog: queues.pooling.base.Catalog - """ - - def __init__(self, pool_catalog): - super(MessageController, self).__init__(None) - self._pool_catalog = pool_catalog - self._get_controller = self._pool_catalog.get_message_controller - - def post(self, queue, messages, client_uuid, project=None): - control = self._get_controller(queue, project) - if control: - return control.post(queue, project=project, - messages=messages, - client_uuid=client_uuid) - raise errors.QueueDoesNotExist(queue, project) - - def delete(self, queue, message_id, project=None, claim=None): - control = self._get_controller(queue, project) - if control: - return control.delete(queue, project=project, - message_id=message_id, claim=claim) - return None - - def bulk_delete(self, queue, message_ids, project=None): - control = self._get_controller(queue, project) - if control: - return control.bulk_delete(queue, project=project, - message_ids=message_ids) - return None - - def pop(self, queue, limit, project=None): - control = self._get_controller(queue, project) - if control: - return control.pop(queue, project=project, limit=limit) - return None - - def bulk_get(self, queue, message_ids, project=None): - control = self._get_controller(queue, project) - if control: - return control.bulk_get(queue, project=project, - message_ids=message_ids) - return [] - - def list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, include_claimed=False): - control = self._get_controller(queue, project) - if control: - return control.list(queue, project=project, - marker=marker, limit=limit, - echo=echo, client_uuid=client_uuid, - include_claimed=include_claimed) - return iter([[]]) - - def get(self, queue, message_id, project=None): - control = self._get_controller(queue, project) - if control: - return control.get(queue, message_id=message_id, - project=project) - raise errors.QueueDoesNotExist(queue, project) - - def first(self, queue, project=None, sort=1): - control = self._get_controller(queue, project) - if control: - return control.first(queue, project=project, sort=sort) - raise errors.QueueDoesNotExist(queue, project) - - -class ClaimController(storage.Claim): - """Routes operations to a claim controller in the appropriate pool. - - :param pool_catalog: a catalog of available pools - :type pool_catalog: queues.pooling.base.Catalog - """ - - def __init__(self, pool_catalog): - super(ClaimController, self).__init__(None) - self._pool_catalog = pool_catalog - self._get_controller = self._pool_catalog.get_claim_controller - - def create(self, queue, metadata, project=None, - limit=storage.DEFAULT_MESSAGES_PER_CLAIM): - control = self._get_controller(queue, project) - if control: - return control.create(queue, metadata=metadata, - project=project, limit=limit) - return [None, []] - - def get(self, queue, claim_id, project=None): - control = self._get_controller(queue, project) - if control: - return control.get(queue, claim_id=claim_id, - project=project) - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - def update(self, queue, claim_id, metadata, project=None): - control = self._get_controller(queue, project) - if control: - return control.update(queue, claim_id=claim_id, - project=project, metadata=metadata) - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - def delete(self, queue, claim_id, project=None): - control = self._get_controller(queue, project) - if control: - return control.delete(queue, claim_id=claim_id, - project=project) - return None - - -class SubscriptionController(storage.Subscription): - """Controller to facilitate processing for subscription operations.""" - - _resource_name = 'subscription' - - def __init__(self, pool_catalog): - super(SubscriptionController, self).__init__(pool_catalog) - self._pool_catalog = pool_catalog - self._get_controller = self._pool_catalog.get_subscription_controller - - def list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE): - control = self._get_controller(queue, project) - if control: - return control.list(queue, project=project, - marker=marker, limit=limit) - - def get(self, queue, subscription_id, project=None): - control = self._get_controller(queue, project) - if control: - return control.get(queue, subscription_id, project=project) - - def create(self, queue, subscriber, ttl, options, project=None): - control = self._get_controller(queue, project) - if control: - return control.create(queue, subscriber, - ttl, options, - project=project) - - def update(self, queue, subscription_id, project=None, **kwargs): - control = self._get_controller(queue, project) - if control: - return control.update(queue, subscription_id, - project=project, **kwargs) - - def delete(self, queue, subscription_id, project=None): - control = self._get_controller(queue, project) - if control: - return control.delete(queue, subscription_id, - project=project) - - def exists(self, queue, subscription_id, project=None): - control = self._get_controller(queue, project) - if control: - return control.exists(queue, subscription_id, - project=project) - - def confirm(self, queue, subscription_id, project=None, confirmed=None): - control = self._get_controller(queue, project) - if control: - return control.confirm(queue, subscription_id, - project=project, confirmed=confirmed) - - def get_with_subscriber(self, queue, subscriber, project=None): - control = self._get_controller(queue, project) - if control: - return control.get_with_subscriber(queue, subscriber, project) - - -class Catalog(object): - """Represents the mapping between queues and pool drivers.""" - - def __init__(self, conf, cache, control): - self._drivers = {} - self._conf = conf - self._cache = cache - self.control = control - - self._conf.register_opts(_CATALOG_OPTIONS, group=_CATALOG_GROUP) - self._catalog_conf = self._conf[_CATALOG_GROUP] - - self._pools_ctrl = control.pools_controller - self._flavor_ctrl = control.flavors_controller - self._catalogue_ctrl = control.catalogue_controller - - # FIXME(cpp-cabrera): https://bugs.launchpad.net/zaqar/+bug/1252791 - def _init_driver(self, pool_id, pool_conf=None): - """Given a pool name, returns a storage driver. - - :param pool_id: The name of a pool. - :type pool_id: six.text_type - :returns: a storage driver - :rtype: zaqar.storage.base.DataDriverBase - """ - if pool_id is not None: - pool = self._pools_ctrl.get(pool_id, detailed=True) - else: - pool = pool_conf - conf = utils.dynamic_conf(pool['uri'], pool['options'], - conf=self._conf) - storage = utils.load_storage_driver(conf, - self._cache, - control_driver=self.control) - return pipeline.DataDriver(conf, storage, self.control) - - @decorators.caches(_pool_cache_key, _POOL_CACHE_TTL) - def _pool_id(self, queue, project=None): - """Get the ID for the pool assigned to the given queue. - - :param queue: name of the queue - :param project: project to which the queue belongs - - :returns: pool id - - :raises QueueNotMapped: if queue is not mapped - """ - return self._catalogue_ctrl.get(project, queue)['pool'] - - def register(self, queue, project=None, flavor=None): - """Register a new queue in the pool catalog. - - This method should be called whenever a new queue is being - created, and will create an entry in the pool catalog for - the given queue. - - After using this method to register the queue in the - catalog, the caller should call `lookup()` to get a reference - to a storage driver which will allow interacting with the - queue's assigned backend pool. - - :param queue: Name of the new queue to assign to a pool - :type queue: six.text_type - :param project: Project to which the queue belongs, or - None for the "global" or "generic" project. - :type project: six.text_type - :param flavor: Flavor for the queue (OPTIONAL) - :type flavor: six.text_type - - :raises NoPoolFound: if not found - - """ - - # NOTE(cpp-cabrera): only register a queue if the entry - # doesn't exist - if not self._catalogue_ctrl.exists(project, queue): - - if flavor is not None: - flavor = self._flavor_ctrl.get(flavor, project=project) - pools = self._pools_ctrl.get_pools_by_group( - group=flavor['pool_group'], - detailed=True) - pool = select.weighted(pools) - pool = pool and pool['name'] or None - else: - # NOTE(flaper87): Get pools assigned to the default - # group `None`. We should consider adding a `default_group` - # option in the future. - pools = self._pools_ctrl.get_pools_by_group(detailed=True) - pool = select.weighted(pools) - pool = pool and pool['name'] or None - - if not pool: - # NOTE(flaper87): We used to raise NoPoolFound in this - # case but we've decided to support automatic pool - # creation. Note that we're now returning and the queue - # is not being registered in the catalogue. This is done - # on purpose since no pool exists and the "dummy" pool - # doesn't exist in the storage - if self.lookup(queue, project) is not None: - return - raise errors.NoPoolFound() - - self._catalogue_ctrl.insert(project, queue, pool) - - @_pool_id.purges - def deregister(self, queue, project=None): - """Removes a queue from the pool catalog. - - Call this method after successfully deleting it from a - backend pool. - - :param queue: Name of the new queue to assign to a pool - :type queue: six.text_type - :param project: Project to which the queue belongs, or - None for the "global" or "generic" project. - :type project: six.text_type - """ - self._catalogue_ctrl.delete(project, queue) - - def get_queue_controller(self, queue, project=None): - """Lookup the queue controller for the given queue and project. - - :param queue: Name of the queue for which to find a pool - :param project: Project to which the queue belongs, or - None to specify the "global" or "generic" project. - - :returns: The queue controller associated with the data driver for - the pool containing (queue, project) or None if this doesn't exist. - :rtype: Maybe QueueController - """ - target = self.lookup(queue, project) - return target and target.queue_controller - - def get_message_controller(self, queue, project=None): - """Lookup the message controller for the given queue and project. - - :param queue: Name of the queue for which to find a pool - :param project: Project to which the queue belongs, or - None to specify the "global" or "generic" project. - - :returns: The message controller associated with the data driver for - the pool containing (queue, project) or None if this doesn't exist. - :rtype: Maybe MessageController - """ - target = self.lookup(queue, project) - return target and target.message_controller - - def get_claim_controller(self, queue, project=None): - """Lookup the claim controller for the given queue and project. - - :param queue: Name of the queue for which to find a pool - :param project: Project to which the queue belongs, or - None to specify the "global" or "generic" project. - - :returns: The claim controller associated with the data driver for - the pool containing (queue, project) or None if this doesn't exist. - :rtype: Maybe ClaimController - """ - target = self.lookup(queue, project) - return target and target.claim_controller - - def get_subscription_controller(self, queue, project=None): - """Lookup the subscription controller for the given queue and project. - - :param queue: Name of the queue for which to find a pool - :param project: Project to which the queue belongs, or - None to specify the "global" or "generic" project. - - :returns: The subscription controller associated with the data driver - for the pool containing (queue, project) or None if this doesn't - exist. - :rtype: Maybe SubscriptionController - """ - target = self.lookup(queue, project) - return target and target.subscription_controller - - def get_default_pool(self, use_listing=True): - if use_listing: - cursor = self._pools_ctrl.list(limit=0) - pools_list = list(next(cursor)) - if pools_list: - return self.get_driver(pools_list[0]['name']) - - if self._catalog_conf.enable_virtual_pool: - conf_section = ('drivers:message_store:%s' % - self._conf.drivers.message_store) - - try: - # NOTE(flaper87): Try to load the driver to check - # whether it can be used as the default store for - # the default pool. - utils.load_storage_driver(self._conf, self._cache, - control_driver=self.control) - except cerrors.InvalidDriver: - # NOTE(kgriffs): Return `None`, rather than letting the - # exception bubble up, so that the higher layer doesn't - # have to duplicate the try..except..log code all over - # the place. - return None - - if conf_section not in self._conf: - # NOTE(flaper87): If there's no config section for this storage - # skip the pool registration entirely since we won't know how - # to connect to it. - return None - - # NOTE(flaper87): This assumes the storage driver type is the - # same as the management. - pool_conf = {'uri': self._conf[conf_section].uri, - 'options': {}} - - # NOTE(flaper87): This will be using the config - # storage configuration as the default one if no - # default storage has been registered in the pool - # store. - return self.get_driver(None, pool_conf) - - def lookup(self, queue, project=None): - """Lookup a pool driver for the given queue and project. - - :param queue: Name of the queue for which to find a pool - :param project: Project to which the queue belongs, or - None to specify the "global" or "generic" project. - - :returns: A storage driver instance for the appropriate pool. If - the driver does not exist yet, it is created and cached. If the - queue is not mapped, returns None. - :rtype: Maybe DataDriver - """ - - try: - pool_id = self._pool_id(queue, project) - except errors.QueueNotMapped as ex: - LOG.debug(ex) - - return self.get_default_pool(use_listing=False) - - return self.get_driver(pool_id) - - def get_driver(self, pool_id, pool_conf=None): - """Get storage driver, preferably cached, from a pool name. - - :param pool_id: The name of a pool. - :type pool_id: six.text_type - :returns: a storage driver - :rtype: zaqar.storage.base.DataDriver - """ - - try: - return self._drivers[pool_id] - except KeyError: - # NOTE(cpp-cabrera): cache storage driver connection - self._drivers[pool_id] = self._init_driver(pool_id, pool_conf) - - return self._drivers[pool_id] diff --git a/zaqar/storage/redis/__init__.py b/zaqar/storage/redis/__init__.py deleted file mode 100644 index 268456f4..00000000 --- a/zaqar/storage/redis/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r""" -Zaqar backed by Redis. - -Redis? ------- - -Redis is sometimes called a "data structure store" because it makes common -data structures like hashes, lists, and sets available in shared, in-memory -storage. Zaqar chose redis because it has strong consistency and its Lua -scripting allows for semi-complex transactions to be built atop the primitives -it provides. - -Supported Features ------------------- - -- FIFO -- Claims -- High Throughput[1]_ -- At-least-once Delivery - -.. [1] This depends on the backing Redis store performance. For more -information, see `Redis' benchmarks `_. - -Redis is only a storage driver, and can't be used as the sole backend for a -Zaqar deployment. - -Unsupported Features --------------------- - -- Durability[2]_ - -.. [2] As an in-memory store, Redis doesn't support the durability guarantees - the MongoDB or SQLAlchemy backends do. - -Redis is not supported as the backend for the Management Store, which means -either MongoDB or SQLAlchemy are required in addition to Redis for a working -deployment. - - -""" diff --git a/zaqar/storage/redis/claims.py b/zaqar/storage/redis/claims.py deleted file mode 100644 index 54b98887..00000000 --- a/zaqar/storage/redis/claims.py +++ /dev/null @@ -1,411 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -import msgpack -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.redis import messages -from zaqar.storage.redis import scripting -from zaqar.storage.redis import utils - -QUEUE_CLAIMS_SUFFIX = 'claims' -CLAIM_MESSAGES_SUFFIX = 'messages' - -RETRY_CLAIM_TIMEOUT = 10 - -# NOTE(kgriffs): Number of claims to read at a time when counting -# the total number of claimed messages for a queue. -# -# TODO(kgriffs): Tune this parameter and/or make it configurable. It -# takes ~0.8 ms to retrieve 100 items from a sorted set on a 2.7 GHz -# Intel Core i7 (not including network latency). -COUNTING_BATCH_SIZE = 100 - - -class ClaimController(storage.Claim, scripting.Mixin): - """Implements claim resource operations using Redis. - - Redis Data Structures: - - 1. Claims list (Redis set) contains claim IDs - - Key: ..claims - - +-------------+---------+ - | Name | Field | - +=============+=========+ - | claim_ids | m | - +-------------+---------+ - - 2. Claimed Messages (Redis set) contains the list - of message ids stored per claim - - Key: .messages - - 3. Claim info (Redis hash): - - Key: - - +----------------+---------+ - | Name | Field | - +================+=========+ - | ttl | t | - +----------------+---------+ - | id | id | - +----------------+---------+ - | expires | e | - +----------------+---------+ - | num_messages | n | - +----------------+---------+ - """ - - script_names = ['claim_messages'] - - def __init__(self, *args, **kwargs): - super(ClaimController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - - self._packer = msgpack.Packer(encoding='utf-8', - use_bin_type=True).pack - self._unpacker = functools.partial(msgpack.unpackb, encoding='utf-8') - - @decorators.lazy_property(write=False) - def _queue_ctrl(self): - return self.driver.queue_controller - - def _get_claim_info(self, claim_id, fields, transform=int): - """Get one or more fields from the claim Info.""" - - values = self._client.hmget(claim_id, fields) - if values == [None]: - return values - else: - return [transform(v) for v in values] if transform else values - - def _claim_messages(self, msgset_key, now, limit, - claim_id, claim_expires, msg_ttl, msg_expires): - - # NOTE(kgriffs): A watch on a pipe could also be used, but that - # is less efficient and predictable, based on our experience in - # having to do something similar in the MongoDB driver. - func = self._scripts['claim_messages'] - - args = [now, limit, claim_id, claim_expires, msg_ttl, msg_expires] - return func(keys=[msgset_key], args=args) - - def _exists(self, queue, claim_id, project): - client = self._client - claims_set_key = utils.scope_claims_set(queue, project, - QUEUE_CLAIMS_SUFFIX) - # In some cases, the queue maybe doesn't exist. So we should check - # whether the queue exists. Return False if no such queue exists. - - # Todo(flwang): We should delete all related data after the queue is - # deleted. See the blueprint for more detail: - # https://blueprints.launchpad.net/zaqar/+spec/clear-resources-after-delete-queue - if not self._queue_ctrl._exists(queue, project): - return False - - # Return False if no such claim exists - # TODO(prashanthr_): Discuss the feasibility of a bloom filter. - if client.zscore(claims_set_key, claim_id) is None: - return False - - expires = self._get_claim_info(claim_id, b'e')[0] - now = timeutils.utcnow_ts() - - if expires <= now: - # NOTE(kgriffs): Redis should automatically remove the - # other records in the very near future. This one - # has to be manually deleted, however. - client.zrem(claims_set_key, claim_id) - return False - - return True - - def _get_claimed_message_keys(self, claim_msgs_key): - return self._client.lrange(claim_msgs_key, 0, -1) - - def _count_messages(self, queue, project): - """Count and return the total number of claimed messages.""" - - # NOTE(kgriffs): Iterate through all claims, adding up the - # number of messages per claim. This is obviously slower - # than keeping a side counter, but is also less error-prone. - # Plus, it avoids having to do a lot of extra work during - # garbage collection passes. Also, considering that most - # workloads won't require a large number of claims, most of - # the time we can do this in a single pass, so it is still - # pretty fast. - - claims_set_key = utils.scope_claims_set(queue, project, - QUEUE_CLAIMS_SUFFIX) - num_claimed = 0 - offset = 0 - - while True: - claim_ids = self._client.zrange(claims_set_key, offset, - offset + COUNTING_BATCH_SIZE - 1) - if not claim_ids: - break - - offset += len(claim_ids) - - with self._client.pipeline() as pipe: - for cid in claim_ids: - pipe.hmget(cid, 'n') - - claim_infos = pipe.execute() - - for info in claim_infos: - # NOTE(kgriffs): In case the claim was deleted out - # from under us, sanity-check that we got a non-None - # info list. - if info: - num_claimed += int(info[0]) - - return num_claimed - - def _del_message(self, queue, project, claim_id, message_id, pipe): - """Called by MessageController when messages are being deleted. - - This method removes the message from claim data structures. - """ - - claim_msgs_key = utils.scope_claim_messages(claim_id, - CLAIM_MESSAGES_SUFFIX) - - # NOTE(kgriffs): In practice, scanning will be quite fast, - # since the usual pattern is to delete messages from oldest - # to newest, and the list is sorted in that order. Also, - # the length of the list will usually be ~10 messages. - pipe.lrem(claim_msgs_key, 1, message_id) - - # NOTE(kgriffs): Decrement the message counter used for stats - pipe.hincrby(claim_id, 'n', -1) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def _gc(self, queue, project): - """Garbage-collect expired claim data. - - Not all claim data can be automatically expired. This method - cleans up the remainder. - - :returns: Number of claims removed - """ - - claims_set_key = utils.scope_claims_set(queue, project, - QUEUE_CLAIMS_SUFFIX) - now = timeutils.utcnow_ts() - num_removed = self._client.zremrangebyscore(claims_set_key, 0, now) - return num_removed - - @utils.raises_conn_error - @utils.retries_on_connection_error - def get(self, queue, claim_id, project=None): - if not self._exists(queue, claim_id, project): - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - claim_msgs_key = utils.scope_claim_messages(claim_id, - CLAIM_MESSAGES_SUFFIX) - - # basic_messages - msg_keys = self._get_claimed_message_keys(claim_msgs_key) - claimed_msgs = messages.Message.from_redis_bulk(msg_keys, - self._client) - now = timeutils.utcnow_ts() - basic_messages = [msg.to_basic(now) - for msg in claimed_msgs if msg] - - # claim_meta - now = timeutils.utcnow_ts() - expires, ttl = self._get_claim_info(claim_id, [b'e', b't']) - update_time = expires - ttl - age = now - update_time - - claim_meta = { - 'age': age, - 'ttl': ttl, - 'id': claim_id, - } - - return claim_meta, basic_messages - - @utils.raises_conn_error - @utils.retries_on_connection_error - def create(self, queue, metadata, project=None, - limit=storage.DEFAULT_MESSAGES_PER_CLAIM): - - claim_ttl = metadata['ttl'] - grace = metadata['grace'] - - now = timeutils.utcnow_ts() - msg_ttl = claim_ttl + grace - claim_expires = now + claim_ttl - msg_expires = claim_expires + grace - - claim_id = uuidutils.generate_uuid() - claimed_msgs = [] - - # NOTE(kgriffs): Claim some messages - msgset_key = utils.msgset_key(queue, project) - claimed_ids = self._claim_messages(msgset_key, now, limit, - claim_id, claim_expires, - msg_ttl, msg_expires) - - if claimed_ids: - claimed_msgs = messages.Message.from_redis_bulk(claimed_ids, - self._client) - claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] - - # NOTE(kgriffs): Perist claim records - with self._client.pipeline() as pipe: - claim_msgs_key = utils.scope_claim_messages( - claim_id, CLAIM_MESSAGES_SUFFIX) - - for mid in claimed_ids: - pipe.rpush(claim_msgs_key, mid) - - pipe.expire(claim_msgs_key, claim_ttl) - - claim_info = { - 'id': claim_id, - 't': claim_ttl, - 'e': claim_expires, - 'n': len(claimed_ids), - } - - pipe.hmset(claim_id, claim_info) - pipe.expire(claim_id, claim_ttl) - - # NOTE(kgriffs): Add the claim ID to a set so that - # existence checks can be performed quickly. This - # is also used as a watch key in order to guard - # against race conditions. - # - # A sorted set is used to facilitate cleaning - # up the IDs of expired claims. - claims_set_key = utils.scope_claims_set(queue, project, - QUEUE_CLAIMS_SUFFIX) - - pipe.zadd(claims_set_key, claim_expires, claim_id) - pipe.execute() - - return claim_id, claimed_msgs - - @utils.raises_conn_error - @utils.retries_on_connection_error - def update(self, queue, claim_id, metadata, project=None): - if not self._exists(queue, claim_id, project): - raise errors.ClaimDoesNotExist(claim_id, queue, project) - - now = timeutils.utcnow_ts() - - claim_ttl = metadata['ttl'] - claim_expires = now + claim_ttl - - grace = metadata['grace'] - msg_ttl = claim_ttl + grace - msg_expires = claim_expires + grace - - claim_msgs_key = utils.scope_claim_messages(claim_id, - CLAIM_MESSAGES_SUFFIX) - - msg_keys = self._get_claimed_message_keys(claim_msgs_key) - claimed_msgs = messages.MessageEnvelope.from_redis_bulk(msg_keys, - self._client) - claim_info = { - 't': claim_ttl, - 'e': claim_expires, - } - - with self._client.pipeline() as pipe: - for msg in claimed_msgs: - if msg: - msg.claim_id = claim_id - msg.claim_expires = claim_expires - - if _msg_would_expire(msg, claim_expires): - msg.ttl = msg_ttl - msg.expires = msg_expires - - # TODO(kgriffs): Rather than writing back the - # entire message, only set the fields that - # have changed. - # - # When this change is made, don't forget to - # also call pipe.expire with the new TTL value. - msg.to_redis(pipe) - - # Update the claim id and claim expiration info - # for all the messages. - pipe.hmset(claim_id, claim_info) - pipe.expire(claim_id, claim_ttl) - - pipe.expire(claim_msgs_key, claim_ttl) - - claims_set_key = utils.scope_claims_set(queue, project, - QUEUE_CLAIMS_SUFFIX) - - pipe.zadd(claims_set_key, claim_expires, claim_id) - - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def delete(self, queue, claim_id, project=None): - # NOTE(prashanthr_): Return silently when the claim - # does not exist - if not self._exists(queue, claim_id, project): - return - - now = timeutils.utcnow_ts() - claim_msgs_key = utils.scope_claim_messages(claim_id, - CLAIM_MESSAGES_SUFFIX) - - msg_keys = self._get_claimed_message_keys(claim_msgs_key) - claimed_msgs = messages.MessageEnvelope.from_redis_bulk(msg_keys, - self._client) - # Update the claim id and claim expiration info - # for all the messages. - claims_set_key = utils.scope_claims_set(queue, project, - QUEUE_CLAIMS_SUFFIX) - - with self._client.pipeline() as pipe: - pipe.zrem(claims_set_key, claim_id) - pipe.delete(claim_id) - pipe.delete(claim_msgs_key) - - for msg in claimed_msgs: - if msg: - msg.claim_id = None - msg.claim_expires = now - - # TODO(kgriffs): Rather than writing back the - # entire message, only set the fields that - # have changed. - msg.to_redis(pipe) - - pipe.execute() - - -def _msg_would_expire(message, now): - return message.expires <= now diff --git a/zaqar/storage/redis/controllers.py b/zaqar/storage/redis/controllers.py deleted file mode 100644 index 04dada1b..00000000 --- a/zaqar/storage/redis/controllers.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.storage.redis import claims -from zaqar.storage.redis import messages -from zaqar.storage.redis import queues -from zaqar.storage.redis import subscriptions - - -QueueController = queues.QueueController -MessageController = messages.MessageController -ClaimController = claims.ClaimController -SubscriptionController = subscriptions.SubscriptionController diff --git a/zaqar/storage/redis/driver.py b/zaqar/storage/redis/driver.py deleted file mode 100644 index a9423119..00000000 --- a/zaqar/storage/redis/driver.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from osprofiler import profiler -import redis -import redis.sentinel -from six.moves import urllib - -from zaqar.common import decorators -from zaqar.common import errors -from zaqar.i18n import _ -from zaqar import storage -from zaqar.storage.redis import controllers -from zaqar.storage.redis import options - -REDIS_DEFAULT_PORT = 6379 -SENTINEL_DEFAULT_PORT = 26379 -DEFAULT_SOCKET_TIMEOUT = 0.1 - -STRATEGY_TCP = 1 -STRATEGY_UNIX = 2 -STRATEGY_SENTINEL = 3 - - -class ConnectionURI(object): - def __init__(self, uri): - # TODO(prashanthr_): Add SSL support - try: - parsed_url = urllib.parse.urlparse(uri) - except SyntaxError: - raise errors.ConfigurationError(_('Malformed Redis URI')) - - if parsed_url.scheme != 'redis': - raise errors.ConfigurationError(_('Invalid scheme in Redis URI')) - - # NOTE(kgriffs): Python 2.6 has a bug that causes the - # query string to be appended to the path when given a - # hostless URL. - path = parsed_url.path - if '?' in path: - path, sep, query = path.partition('?') - else: - query = parsed_url.query - - query_params = dict(urllib.parse.parse_qsl(query)) - - # Generic - self.strategy = None - self.socket_timeout = float(query_params.get('socket_timeout', - DEFAULT_SOCKET_TIMEOUT)) - - # TCP - self.port = None - self.hostname = None - - # UNIX socket - self.unix_socket_path = None - - # Sentinel - self.master = None - self.sentinels = [] - - if 'master' in query_params: - # NOTE(prashanthr_): Configure redis driver in sentinel mode - self.strategy = STRATEGY_SENTINEL - self.master = query_params['master'] - - # NOTE(kgriffs): Have to parse list of sentinel hosts ourselves - # since urllib doesn't support it. - for each_host in parsed_url.netloc.split(','): - name, sep, port = each_host.partition(':') - - if port: - try: - port = int(port) - except ValueError: - msg = _('The Redis configuration URI contains an ' - 'invalid port') - raise errors.ConfigurationError(msg) - - else: - port = SENTINEL_DEFAULT_PORT - - self.sentinels.append((name, port)) - - if not self.sentinels: - msg = _('The Redis configuration URI does not define any ' - 'sentinel hosts') - raise errors.ConfigurationError(msg) - - elif parsed_url.netloc: - if ',' in parsed_url.netloc: - # NOTE(kgriffs): They probably were specifying - # a list of sentinel hostnames, but forgot to - # add 'master' to the query string. - msg = _('The Redis URI specifies multiple sentinel hosts, ' - 'but is missing the "master" query string ' - 'parameter. Please set "master" to the name of ' - 'the Redis master server as specified in the ' - 'sentinel configuration file.') - raise errors.ConfigurationError(msg) - - self.strategy = STRATEGY_TCP - try: - self.port = parsed_url.port or REDIS_DEFAULT_PORT - except ValueError: - msg = _('The Redis configuration URI contains an ' - 'invalid port') - raise errors.ConfigurationError(msg) - - if not parsed_url.hostname: - msg = _('Missing host name in Redis URI') - raise errors.ConfigurationError(msg) - - self.hostname = parsed_url.hostname - - else: - self.strategy = STRATEGY_UNIX - - if not path: - msg = _('Missing path in Redis URI') - raise errors.ConfigurationError(msg) - - self.unix_socket_path = path - - assert self.strategy in (STRATEGY_TCP, STRATEGY_UNIX, - STRATEGY_SENTINEL) - - -class DataDriver(storage.DataDriverBase): - - # NOTE(flaper87): The driver doesn't guarantee - # durability for Redis. - BASE_CAPABILITIES = (storage.Capabilities.FIFO, - storage.Capabilities.CLAIMS, - storage.Capabilities.AOD, - storage.Capabilities.HIGH_THROUGHPUT) - - _DRIVER_OPTIONS = options._config_options() - - def __init__(self, conf, cache, control_driver): - super(DataDriver, self).__init__(conf, cache, control_driver) - self.redis_conf = self.conf[options.MESSAGE_REDIS_GROUP] - - server_version = self.connection.info()['redis_version'] - if tuple(map(int, server_version.split('.'))) < (2, 6): - msg = _('The Redis driver requires redis-server>=2.6, ' - '%s found') % server_version - - raise RuntimeError(msg) - - # FIXME(flaper87): Make this dynamic - self._capabilities = self.BASE_CAPABILITIES - - @property - def capabilities(self): - return self._capabilities - - def is_alive(self): - try: - return self.connection.ping() - except redis.exceptions.ConnectionError: - return False - - def close(self): - self.connection.close() - - def _health(self): - KPI = {} - KPI['storage_reachable'] = self.is_alive() - KPI['operation_status'] = self._get_operation_status() - - # TODO(kgriffs): Add metrics re message volume - return KPI - - def gc(self): - # TODO(kgriffs): Check time since last run, and if - # it hasn't been very long, skip. This allows for - # running the GC script on multiple boxes for HA, - # without having them all attempting to GC at the - # same moment. - self.message_controller.gc() - - @decorators.lazy_property(write=False) - def connection(self): - """Redis client connection instance.""" - return _get_redis_client(self) - - @decorators.lazy_property(write=False) - def message_controller(self): - controller = controllers.MessageController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("redis_message_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def claim_controller(self): - controller = controllers.ClaimController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("redis_claim_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def subscription_controller(self): - controller = controllers.SubscriptionController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("redis_subscription_" - "controller")(controller) - else: - return controller - - -class ControlDriver(storage.ControlDriverBase): - - def __init__(self, conf, cache): - super(ControlDriver, self).__init__(conf, cache) - - self.conf.register_opts(options.MANAGEMENT_REDIS_OPTIONS, - group=options.MANAGEMENT_REDIS_GROUP) - - self.redis_conf = self.conf[options.MANAGEMENT_REDIS_GROUP] - - def close(self): - self.connection.close() - - @decorators.lazy_property(write=False) - def connection(self): - """Redis client connection instance.""" - return _get_redis_client(self) - - @decorators.lazy_property(write=False) - def queue_controller(self): - controller = controllers.QueueController(self) - if (self.conf.profiler.enabled and - (self.conf.profiler.trace_message_store or - self.conf.profiler.trace_management_store)): - return profiler.trace_cls("redis_queue_controller")(controller) - else: - return controller - - @property - def pools_controller(self): - raise NotImplementedError() - - @property - def catalogue_controller(self): - raise NotImplementedError() - - @property - def flavors_controller(self): - raise NotImplementedError() - - -def _get_redis_client(driver): - conf = driver.redis_conf - connection_uri = ConnectionURI(conf.uri) - - if connection_uri.strategy == STRATEGY_SENTINEL: - sentinel = redis.sentinel.Sentinel( - connection_uri.sentinels, - socket_timeout=connection_uri.socket_timeout) - - # NOTE(prashanthr_): The socket_timeout parameter being generic - # to all redis connections is inherited from the parameters for - # sentinel. - return sentinel.master_for(connection_uri.master) - - elif connection_uri.strategy == STRATEGY_TCP: - return redis.StrictRedis( - host=connection_uri.hostname, - port=connection_uri.port, - socket_timeout=connection_uri.socket_timeout) - else: - return redis.StrictRedis( - unix_socket_path=connection_uri.unix_socket_path, - socket_timeout=connection_uri.socket_timeout) diff --git a/zaqar/storage/redis/messages.py b/zaqar/storage/redis/messages.py deleted file mode 100644 index 13a670a3..00000000 --- a/zaqar/storage/redis/messages.py +++ /dev/null @@ -1,614 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import uuid - -from oslo_utils import encodeutils -from oslo_utils import timeutils -import redis - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.redis import models -from zaqar.storage.redis import scripting -from zaqar.storage.redis import utils - -Message = models.Message -MessageEnvelope = models.MessageEnvelope - - -MSGSET_INDEX_KEY = 'msgset_index' - -# The rank counter is an atomic index to rank messages -# in a FIFO manner. -MESSAGE_RANK_COUNTER_SUFFIX = 'rank_counter' - -# NOTE(kgriffs): This value, in seconds, should be at least less than the -# minimum allowed TTL for messages (60 seconds). -RETRY_POST_TIMEOUT = 10 - -# TODO(kgriffs): Tune this and/or make it configurable. Don't want -# it to be so large that it blocks other operations for more than -# 1-2 milliseconds. -GC_BATCH_SIZE = 100 - - -class MessageController(storage.Message, scripting.Mixin): - """Implements message resource operations using Redis. - - Messages are scoped by project + queue. - - Redis Data Structures: - - 1. Message id's list (Redis sorted set) - - Each queue in the system has a set of message ids currently - in the queue. The list is sorted based on a ranking which is - incremented atomically using the counter(MESSAGE_RANK_COUNTER_SUFFIX) - also stored in the database for every queue. - - Key: ..messages - - 2. Index of message ID lists (Redis sorted set) - - This is a sorted set that facilitates discovery of all the - message ID lists. This is necessary when performing - garbage collection on the IDs contained within these lists. - - Key: msgset_index - - 3. Messages(Redis Hash): - - Scoped by the UUID of the message, the redis datastructure - has the following information. - - +---------------------+---------+ - | Name | Field | - +=====================+=========+ - | id | id | - +---------------------+---------+ - | ttl | t | - +---------------------+---------+ - | expires | e | - +---------------------+---------+ - | body | b | - +---------------------+---------+ - | claim | c | - +---------------------+---------+ - | claim expiry time | c.e | - +---------------------+---------+ - | client uuid | u | - +---------------------+---------+ - | created time | cr | - +---------------------+---------+ - - 4. Messages rank counter (Redis Hash): - - Key: ..rank_counter - """ - - script_names = ['index_messages'] - - def __init__(self, *args, **kwargs): - super(MessageController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - - @decorators.lazy_property(write=False) - def _queue_ctrl(self): - return self.driver.queue_controller - - def _index_messages(self, msgset_key, counter_key, message_ids): - # NOTE(kgriffs): A watch on a pipe could also be used to ensure - # messages are inserted in order, but that would be less efficient. - func = self._scripts['index_messages'] - - arguments = [len(message_ids)] + message_ids - func(keys=[msgset_key, counter_key], args=arguments) - - def _count(self, queue, project): - """Return total number of messages in a queue. - - Note: Some expired messages may be included in the count if - they haven't been GC'd yet. This is done for performance. - """ - - return self._client.zcard(utils.msgset_key(queue, project)) - - def _create_msgset(self, queue, project, pipe): - pipe.zadd(MSGSET_INDEX_KEY, 1, utils.msgset_key(queue, project)) - - def _delete_msgset(self, queue, project, pipe): - pipe.zrem(MSGSET_INDEX_KEY, utils.msgset_key(queue, project)) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def _delete_queue_messages(self, queue, project, pipe): - """Method to remove all the messages belonging to a queue. - - Will be referenced from the QueueController. - The pipe to execute deletion will be passed from the QueueController - executing the operation. - """ - client = self._client - msgset_key = utils.msgset_key(queue, project) - message_ids = client.zrange(msgset_key, 0, -1) - - pipe.delete(msgset_key) - for msg_id in message_ids: - pipe.delete(msg_id) - - # TODO(prashanthr_): Look for better ways to solve the issue. - def _find_first_unclaimed(self, queue, project, limit): - """Find the first unclaimed message in the queue.""" - - msgset_key = utils.msgset_key(queue, project) - now = timeutils.utcnow_ts() - - # TODO(kgriffs): Generalize this paging pattern (DRY) - offset = 0 - - while True: - msg_keys = self._client.zrange(msgset_key, offset, - offset + limit - 1) - if not msg_keys: - return None - - offset += len(msg_keys) - - messages = [MessageEnvelope.from_redis(msg_key, self._client) - for msg_key in msg_keys] - - for msg in messages: - if msg and not utils.msg_claimed_filter(msg, now): - return msg.id - - def _exists(self, message_id): - """Check if message exists in the Queue.""" - return self._client.exists(message_id) - - def _get_first_message_id(self, queue, project, sort): - """Fetch head/tail of the Queue. - - Helper function to get the first message in the queue - sort > 0 get from the left else from the right. - """ - msgset_key = utils.msgset_key(queue, project) - - zrange = self._client.zrange if sort == 1 else self._client.zrevrange - message_ids = zrange(msgset_key, 0, 0) - return message_ids[0] if message_ids else None - - def _get_claim(self, message_id): - """Gets minimal claim doc for a message. - - :returns: {'id': cid, 'expires': ts} IFF the message is claimed, - and that claim has not expired. - """ - - claim = self._client.hmget(message_id, 'c', 'c.e') - - if claim == [None, None]: - # NOTE(kgriffs): message_id was not found - return None - - info = { - # NOTE(kgriffs): A "None" claim is serialized as an empty str - 'id': encodeutils.safe_decode(claim[0]) or None, - 'expires': int(claim[1]), - } - - # Is the message claimed? - now = timeutils.utcnow_ts() - if info['id'] and (now < info['expires']): - return info - - # Not claimed - return None - - def _list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, - include_claimed=False, - to_basic=True): - - if not self._queue_ctrl.exists(queue, project): - raise errors.QueueDoesNotExist(queue, - project) - - msgset_key = utils.msgset_key(queue, project) - client = self._client - - if not marker and not include_claimed: - # NOTE(kgriffs): Skip claimed messages at the head - # of the queue; otherwise we would just filter them all - # out and likely end up with an empty list to return. - marker = self._find_first_unclaimed(queue, project, limit) - start = client.zrank(msgset_key, marker) or 0 - else: - rank = client.zrank(msgset_key, marker) - start = rank + 1 if rank else 0 - - message_ids = client.zrange(msgset_key, start, - start + (limit - 1)) - - messages = Message.from_redis_bulk(message_ids, client) - - # NOTE(prashanthr_): Build a list of filters for checking - # the following: - # - # 1. Message is expired - # 2. Message is claimed - # 3. Message should not be echoed - # - now = timeutils.utcnow_ts() - filters = [functools.partial(utils.msg_expired_filter, now=now)] - - if not include_claimed: - filters.append(functools.partial(utils.msg_claimed_filter, - now=now)) - - if not echo: - filters.append(functools.partial(utils.msg_echo_filter, - client_uuid=client_uuid)) - - marker = {} - - yield _filter_messages(messages, filters, to_basic, marker) - yield marker['next'] - - @utils.raises_conn_error - @utils.retries_on_connection_error - def gc(self): - """Garbage-collect expired message data. - - Not all message data can be automatically expired. This method - cleans up the remainder. - - :returns: Number of messages removed - """ - claim_ctrl = self.driver.claim_controller - client = self._client - - num_removed = 0 - offset_msgsets = 0 - - while True: - # NOTE(kgriffs): Iterate across all message sets; there will - # be one set of message IDs per queue. - msgset_keys = client.zrange(MSGSET_INDEX_KEY, - offset_msgsets, - offset_msgsets + GC_BATCH_SIZE - 1) - if not msgset_keys: - break - - offset_msgsets += len(msgset_keys) - - for msgset_key in msgset_keys: - msgset_key = encodeutils.safe_decode(msgset_key) - - # NOTE(kgriffs): Drive the claim controller GC from - # here, because we already know the queue and project - # scope. - queue, project = utils.descope_message_ids_set(msgset_key) - claim_ctrl._gc(queue, project) - - offset_mids = 0 - - while True: - # NOTE(kgriffs): Look up each message in the message set, - # see if it has expired, and if so, remove it from msgset. - mids = client.zrange(msgset_key, offset_mids, - offset_mids + GC_BATCH_SIZE - 1) - - if not mids: - break - - offset_mids += len(mids) - - # NOTE(kgriffs): If redis expired the message, it will - # not exist, so all we have to do is remove mid from - # the msgset collection. - with client.pipeline() as pipe: - for mid in mids: - pipe.exists(mid) - - mid_exists_flags = pipe.execute() - - with client.pipeline() as pipe: - for mid, exists in zip(mids, mid_exists_flags): - if not exists: - pipe.zrem(msgset_key, mid) - num_removed += 1 - - pipe.execute() - - return num_removed - - @utils.raises_conn_error - @utils.retries_on_connection_error - def list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, - include_claimed=False): - - return self._list(queue, project, marker, limit, echo, - client_uuid, include_claimed) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def first(self, queue, project=None, sort=1): - if sort not in (1, -1): - raise ValueError(u'sort must be either 1 (ascending) ' - u'or -1 (descending)') - - message_id = self._get_first_message_id(queue, project, sort) - if not message_id: - raise errors.QueueIsEmpty(queue, project) - - message = Message.from_redis(message_id, self._client) - if message is None: - raise errors.QueueIsEmpty(queue, project) - - now = timeutils.utcnow_ts() - return message.to_basic(now, include_created=True) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def get(self, queue, message_id, project=None): - if not self._queue_ctrl.exists(queue, project): - raise errors.QueueDoesNotExist(queue, project) - - message = Message.from_redis(message_id, self._client) - now = timeutils.utcnow_ts() - - if message and not utils.msg_expired_filter(message, now): - return message.to_basic(now) - else: - raise errors.MessageDoesNotExist(message_id, queue, project) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def bulk_get(self, queue, message_ids, project=None): - if not self._queue_ctrl.exists(queue, project): - return iter([]) - - # NOTE(prashanthr_): Pipelining is used here purely - # for performance. - with self._client.pipeline() as pipe: - for mid in message_ids: - pipe.hgetall(mid) - - messages = pipe.execute() - - # NOTE(kgriffs): Skip messages that may have been deleted - now = timeutils.utcnow_ts() - return (Message.from_hmap(msg).to_basic(now) - for msg in messages if msg) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def post(self, queue, messages, client_uuid, project=None): - msgset_key = utils.msgset_key(queue, project) - counter_key = utils.scope_queue_index(queue, project, - MESSAGE_RANK_COUNTER_SUFFIX) - - message_ids = [] - now = timeutils.utcnow_ts() - - with self._client.pipeline() as pipe: - for msg in messages: - prepared_msg = Message( - ttl=msg['ttl'], - created=now, - client_uuid=client_uuid, - claim_id=None, - claim_expires=now, - body=msg.get('body', {}), - ) - - prepared_msg.to_redis(pipe) - message_ids.append(prepared_msg.id) - - pipe.execute() - - # NOTE(kgriffs): If this call fails, we will return - # an error to the client and the messages will be - # orphaned, but Redis will remove them when they - # expire, so we will just pretend they don't exist - # in that case. - self._index_messages(msgset_key, counter_key, message_ids) - - return message_ids - - @utils.raises_conn_error - @utils.retries_on_connection_error - def delete(self, queue, message_id, project=None, claim=None): - claim_ctrl = self.driver.claim_controller - if not self._queue_ctrl.exists(queue, project): - return - - # NOTE(kgriffs): The message does not exist, so - # it is essentially "already" deleted. - if not self._exists(message_id): - return - - # TODO(kgriffs): Create decorator for validating claim and message - # IDs, since those are not checked at the transport layer. This - # decorator should be applied to all relevant methods. - if claim is not None: - try: - uuid.UUID(claim) - except ValueError: - raise errors.ClaimDoesNotExist(claim, queue, project) - - msg_claim = self._get_claim(message_id) - is_claimed = (msg_claim is not None) - - # Authorize the request based on having the correct claim ID - if claim is None: - if is_claimed: - raise errors.MessageIsClaimed(message_id) - - elif not is_claimed: - raise errors.MessageNotClaimed(message_id) - - elif msg_claim['id'] != claim: - if not claim_ctrl._exists(queue, claim, project): - raise errors.ClaimDoesNotExist(claim, queue, project) - - raise errors.MessageNotClaimedBy(message_id, claim) - - msgset_key = utils.msgset_key(queue, project) - - with self._client.pipeline() as pipe: - pipe.delete(message_id) - pipe.zrem(msgset_key, message_id) - - if is_claimed: - claim_ctrl._del_message(queue, project, msg_claim['id'], - message_id, pipe) - - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def bulk_delete(self, queue, message_ids, project=None): - claim_ctrl = self.driver.claim_controller - if not self._queue_ctrl.exists(queue, project): - return - - msgset_key = utils.msgset_key(queue, project) - - with self._client.pipeline() as pipe: - for mid in message_ids: - if not self._exists(mid): - continue - - pipe.delete(mid) - pipe.zrem(msgset_key, mid) - - msg_claim = self._get_claim(mid) - if msg_claim is not None: - claim_ctrl._del_message(queue, project, msg_claim['id'], - mid, pipe) - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def pop(self, queue, limit, project=None): - # Pop is implemented as a chain of the following operations: - # 1. Create a claim. - # 2. Delete the messages claimed. - # 3. Delete the claim. - claim_ctrl = self.driver.claim_controller - claim_id, messages = claim_ctrl.create( - queue, dict(ttl=1, grace=0), project, limit=limit) - - message_ids = [message['id'] for message in messages] - self.bulk_delete(queue, message_ids, project) - # NOTE(prashanthr_): Creating a claim controller reference - # causes a recursive reference. Hence, using the reference - # from the driver. - claim_ctrl.delete(queue, claim_id, project) - return messages - - -def _filter_messages(messages, filters, to_basic, marker): - """Create a filtering iterator over a list of messages. - - The function accepts a list of filters to be filtered - before the the message can be included as a part of the reply. - """ - now = timeutils.utcnow_ts() - - for msg in messages: - # NOTE(kgriffs): Message may have been deleted, so - # check each value to ensure we got a message back - if msg is None: - continue - - # NOTE(kgriffs): Check to see if any of the filters - # indiciate that this message should be skipped. - for should_skip in filters: - if should_skip(msg): - break - else: - marker['next'] = msg.id - - if to_basic: - yield msg.to_basic(now) - else: - yield msg - -QUEUES_SET_STORE_NAME = 'queues_set' - - -class MessageQueueHandler(object): - def __init__(self, driver, control_driver): - self.driver = driver - self._client = self.driver.connection - self._queue_ctrl = self.driver.queue_controller - self._message_ctrl = self.driver.message_controller - self._claim_ctrl = self.driver.claim_controller - - @utils.raises_conn_error - def create(self, name, metadata=None, project=None): - with self._client.pipeline() as pipe: - self._message_ctrl._create_msgset(name, project, pipe) - - try: - pipe.execute() - except redis.exceptions.ResponseError: - return False - - @utils.raises_conn_error - @utils.retries_on_connection_error - def delete(self, name, project=None): - with self._client.pipeline() as pipe: - self._message_ctrl._delete_msgset(name, project, pipe) - self._message_ctrl._delete_queue_messages(name, project, pipe) - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def stats(self, name, project=None): - if not self._queue_ctrl.exists(name, project=project): - raise errors.QueueDoesNotExist(name, project) - - total = self._message_ctrl._count(name, project) - - if total: - claimed = self._claim_ctrl._count_messages(name, project) - else: - claimed = 0 - - message_stats = { - 'claimed': claimed, - 'free': total - claimed, - 'total': total, - } - - if total: - try: - newest = self._message_ctrl.first(name, project, -1) - oldest = self._message_ctrl.first(name, project, 1) - except errors.QueueIsEmpty: - pass - else: - message_stats['newest'] = newest - message_stats['oldest'] = oldest - - return {'messages': message_stats} diff --git a/zaqar/storage/redis/models.py b/zaqar/storage/redis/models.py deleted file mode 100644 index 77aefb05..00000000 --- a/zaqar/storage/redis/models.py +++ /dev/null @@ -1,320 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import uuid - -import msgpack -from oslo_utils import encodeutils -from oslo_utils import timeutils -from oslo_utils import uuidutils - -MSGENV_FIELD_KEYS = (b'id', b't', b'cr', b'e', b'u', b'c', b'c.e') -SUBENV_FIELD_KEYS = (b'id', b's', b'u', b't', b'e', b'o', b'p', b'c') - - -# TODO(kgriffs): Make similar classes for claims and queues -class MessageEnvelope(object): - """Encapsulates the message envelope (metadata only, no body). - - :param id: Message ID in the form of a hexadecimal UUID. If not - given, one will be automatically generated. - :param ttl: Message TTL in seconds - :param created: Message creation time as a UNIX timestamp - :param client_uuid: UUID of the client that posted the message - :param claim_id: If claimed, the UUID of the claim. Set to None - for messages that have never been claimed. - :param claim_expires: Claim expiration as a UNIX timestamp - """ - - __slots__ = [ - 'id', - 'ttl', - 'created', - 'expires', - 'client_uuid', - 'claim_id', - 'claim_expires', - ] - - def __init__(self, **kwargs): - self.id = _validate_uuid4(kwargs.get('id', uuidutils.generate_uuid())) - self.ttl = kwargs['ttl'] - self.created = kwargs['created'] - self.expires = kwargs.get('expires', self.created + self.ttl) - - self.client_uuid = _validate_uuid4(str(kwargs['client_uuid'])) - - self.claim_id = kwargs.get('claim_id') - if self.claim_id: - _validate_uuid4(self.claim_id) - self.claim_expires = kwargs['claim_expires'] - - @staticmethod - def from_hmap(hmap): - kwargs = _hmap_to_msgenv_kwargs(hmap) - return MessageEnvelope(**kwargs) - - @staticmethod - def from_redis(mid, client): - values = client.hmget(mid, MSGENV_FIELD_KEYS) - - # NOTE(kgriffs): If the key does not exist, redis-py returns - # an array of None values. - if values[0] is None: - return None - - return _hmap_kv_to_msgenv(MSGENV_FIELD_KEYS, values) - - @staticmethod - def from_redis_bulk(message_ids, client): - with client.pipeline() as pipe: - for mid in message_ids: - pipe.hmget(mid, MSGENV_FIELD_KEYS) - - results = pipe.execute() - - message_envs = [] - for value_list in results: - if value_list is None: - env = None - else: - env = _hmap_kv_to_msgenv(MSGENV_FIELD_KEYS, value_list) - - message_envs.append(env) - - return message_envs - - def to_redis(self, pipe): - hmap = _msgenv_to_hmap(self) - - pipe.hmset(self.id, hmap) - pipe.expire(self.id, self.ttl) - - -class SubscriptionEnvelope(object): - """Encapsulates the subscription envelope.""" - - __slots__ = [ - 'id', - 'source', - 'subscriber', - 'ttl', - 'expires', - 'options', - 'project', - 'confirmed', - ] - - def __init__(self, **kwargs): - self.id = kwargs.get('id', uuidutils.generate_uuid()) - self.source = kwargs['source'] - self.subscriber = kwargs['subscriber'] - self.ttl = kwargs['ttl'] - self.expires = kwargs.get('expires', float('inf')) - self.options = kwargs['options'] - self.confirmed = kwargs.get('confirmed', 'True') - - @staticmethod - def from_redis(sid, client): - values = client.hmget(sid, SUBENV_FIELD_KEYS) - - # NOTE(kgriffs): If the key does not exist, redis-py returns - # an array of None values. - if values[0] is None: - return None - - return _hmap_kv_to_subenv(SUBENV_FIELD_KEYS, values) - - def to_redis(self, pipe): - hmap = _subenv_to_hmap(self) - - pipe.hmset(self.id, hmap) - pipe.expire(self.id, self.ttl) - - def to_basic(self, now): - created = self.expires - self.ttl - is_confirmed = self.confirmed == str(True) - basic_msg = { - 'id': self.id, - 'source': self.source, - 'subscriber': self.subscriber, - 'ttl': self.ttl, - 'age': now - created, - 'options': self.options, - 'confirmed': is_confirmed, - } - - return basic_msg - - -# NOTE(kgriffs): This could have implemented MessageEnvelope functionality -# by adding an "include_body" param to all the methods, but then you end -# up with tons of if statements that make the code rather ugly. -class Message(MessageEnvelope): - """Represents an entire message, including envelope and body. - - :param id: Message ID in the form of a hexadecimal UUID. If not - given, one will be automatically generated. - :param ttl: Message TTL in seconds - :param created: Message creation time as a UNIX timestamp - :param client_uuid: UUID of the client that posted the message - :param claim_id: If claimed, the UUID of the claim. Set to None - for messages that have never been claimed. - :param claim_expires: Claim expiration as a UNIX timestamp - :param body: Message payload. Must be serializable to mspack. - """ - - __slots__ = MessageEnvelope.__slots__ + ['body'] - - def __init__(self, **kwargs): - super(Message, self).__init__(**kwargs) - self.body = kwargs['body'] - - @staticmethod - def from_hmap(hmap): - kwargs = _hmap_to_msgenv_kwargs(hmap) - kwargs['body'] = _unpack(hmap[b'b']) - - return Message(**kwargs) - - @staticmethod - def from_redis(mid, client): - hmap = client.hgetall(mid) - return Message.from_hmap(hmap) if hmap else None - - @staticmethod - def from_redis_bulk(message_ids, client): - with client.pipeline() as pipe: - for mid in message_ids: - pipe.hgetall(mid) - - results = pipe.execute() - - messages = [Message.from_hmap(hmap) if hmap else None - for hmap in results] - - return messages - - def to_redis(self, pipe, include_body=True): - if not include_body: - super(Message, self).to_redis(pipe) - - hmap = _msgenv_to_hmap(self) - hmap['b'] = _pack(self.body) - - pipe.hmset(self.id, hmap) - pipe.expire(self.id, self.ttl) - - def to_basic(self, now, include_created=False): - basic_msg = { - 'id': self.id, - 'age': now - self.created, - 'ttl': self.ttl, - 'body': self.body, - 'claim_id': self.claim_id, - } - - if include_created: - created_iso = timeutils.iso8601_from_timestamp(self.created) - basic_msg['created'] = created_iso - - return basic_msg - - -# ========================================================================== -# Helpers -# ========================================================================== - - -_pack = msgpack.Packer(encoding='utf-8', use_bin_type=True).pack -_unpack = functools.partial(msgpack.unpackb, encoding='utf-8') - - -def _hmap_kv_to_msgenv(keys, values): - hmap = dict(zip(keys, values)) - kwargs = _hmap_to_msgenv_kwargs(hmap) - return MessageEnvelope(**kwargs) - - -def _hmap_to_msgenv_kwargs(hmap): - claim_id = hmap[b'c'] - if claim_id: - claim_id = encodeutils.safe_decode(claim_id) - else: - claim_id = None - - # NOTE(kgriffs): Under Py3K, redis-py converts all strings - # into binary. Woohoo! - return { - 'id': encodeutils.safe_decode(hmap[b'id']), - 'ttl': int(hmap[b't']), - 'created': int(hmap[b'cr']), - 'expires': int(hmap[b'e']), - - 'client_uuid': encodeutils.safe_decode(hmap[b'u']), - - 'claim_id': claim_id, - 'claim_expires': int(hmap[b'c.e']), - } - - -def _msgenv_to_hmap(msg): - return { - 'id': msg.id, - 't': msg.ttl, - 'cr': msg.created, - 'e': msg.expires, - 'u': msg.client_uuid, - 'c': msg.claim_id or '', - 'c.e': msg.claim_expires, - } - - -def _hmap_kv_to_subenv(keys, values): - hmap = dict(zip(keys, values)) - kwargs = _hmap_to_subenv_kwargs(hmap) - return SubscriptionEnvelope(**kwargs) - - -def _hmap_to_subenv_kwargs(hmap): - # NOTE(kgriffs): Under Py3K, redis-py converts all strings - # into binary. Woohoo! - return { - 'id': encodeutils.safe_decode(hmap[b'id']), - 'source': hmap[b's'], - 'subscriber': hmap[b'u'], - 'ttl': int(hmap[b't']), - 'expires': int(hmap[b'e']), - 'options': _unpack(hmap[b'o']), - 'confirmed': hmap[b'c'] - } - - -def _subenv_to_hmap(msg): - return { - 'id': msg.id, - 's': msg.source, - 'u': msg.subscriber, - 't': msg.ttl, - 'e': msg.expires, - 'o': msg.options - } - - -def _validate_uuid4(_uuid): - uuid.UUID(str(_uuid), version=4) - return _uuid diff --git a/zaqar/storage/redis/options.py b/zaqar/storage/redis/options.py deleted file mode 100644 index fff99080..00000000 --- a/zaqar/storage/redis/options.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Redis storage driver configuration options.""" - -from oslo_config import cfg - -_deprecated_group = 'drivers:storage:redis' -# options common to management and message storage -_COMMON_REDIS_OPTIONS = ( - cfg.StrOpt('uri', default="redis://127.0.0.1:6379", - deprecated_opts=[cfg.DeprecatedOpt( - 'uri', - group=_deprecated_group), ], - help=('Redis connection URI, taking one of three forms. ' - 'For a direct connection to a Redis server, use ' - 'the form "redis://host[:port][?options]", where ' - 'port defaults to 6379 if not specified. For an ' - 'HA master-slave Redis cluster using Redis Sentinel, ' - 'use the form "redis://host1[:port1]' - '[,host2[:port2],...,hostN[:portN]][?options]", ' - 'where each host specified corresponds to an ' - 'instance of redis-sentinel. In this form, the ' - 'name of the Redis master used in the Sentinel ' - 'configuration must be included in the query ' - 'string as "master=". Finally, to connect ' - 'to a local instance of Redis over a unix socket, ' - 'you may use the form ' - '"redis:/path/to/redis.sock[?options]". In all ' - 'forms, the "socket_timeout" option may be ' - 'specified in the query string. Its value is ' - 'given in seconds. If not provided, ' - '"socket_timeout" defaults to 0.1 seconds.')), - - cfg.IntOpt('max_reconnect_attempts', default=10, - deprecated_opts=[cfg.DeprecatedOpt( - 'max_reconnect_attempts', - group=_deprecated_group), ], - help=('Maximum number of times to retry an operation that ' - 'failed due to a redis node failover.')), - - cfg.FloatOpt('reconnect_sleep', default=1.0, - deprecated_opts=[cfg.DeprecatedOpt( - 'reconnect_sleep', - group=_deprecated_group), ], - help=('Base sleep interval between attempts to reconnect ' - 'after a redis node failover. ')) - -) - -MANAGEMENT_REDIS_OPTIONS = _COMMON_REDIS_OPTIONS -MESSAGE_REDIS_OPTIONS = _COMMON_REDIS_OPTIONS - -MANAGEMENT_REDIS_GROUP = 'drivers:management_store:redis' -MESSAGE_REDIS_GROUP = 'drivers:message_store:redis' - - -def _config_options(): - return [(MANAGEMENT_REDIS_GROUP, MANAGEMENT_REDIS_OPTIONS), - (MESSAGE_REDIS_GROUP, MESSAGE_REDIS_OPTIONS)] diff --git a/zaqar/storage/redis/queues.py b/zaqar/storage/redis/queues.py deleted file mode 100644 index 1af454e9..00000000 --- a/zaqar/storage/redis/queues.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -import msgpack -from oslo_utils import timeutils -import redis - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.redis import utils - -QUEUES_SET_STORE_NAME = 'queues_set' -MESSAGE_IDS_SUFFIX = 'messages' - - -class QueueController(storage.Queue): - """Implements queue resource operations using Redis. - - Queues are scoped by project, which is prefixed to the - queue name. - - Redis Data Structures: - - 1. Queue Index (Redis sorted set): - - Set of all queues for the given project, ordered by name. - - Key: .queues_set - - +--------+-----------------------------+ - | Id | Value | - +========+=============================+ - | name | . | - +--------+-----------------------------+ - - 2. Queue Information (Redis hash): - - Key: . - - +----------------------+---------+ - | Name | Field | - +======================+=========+ - | metadata | m | - +----------------------+---------+ - | creation timestamp | t | - +----------------------+---------+ - """ - - def __init__(self, *args, **kwargs): - super(QueueController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - self._packer = msgpack.Packer(encoding='utf-8', - use_bin_type=True).pack - self._unpacker = functools.partial(msgpack.unpackb, encoding='utf-8') - - @decorators.lazy_property(write=False) - def _claim_ctrl(self): - return self.driver.claim_controller - - @decorators.lazy_property(write=False) - def _subscription_ctrl(self): - return self.driver.subscription_controller - - def _get_queue_info(self, queue_key, fields, transform=str): - """Get one or more fields from Queue Info.""" - - values = self._client.hmget(queue_key, fields) - return [transform(v) for v in values] if transform else values - - @utils.raises_conn_error - @utils.retries_on_connection_error - def _list(self, project=None, marker=None, - limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False): - client = self._client - qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) - marker = utils.scope_queue_name(marker, project) - rank = client.zrank(qset_key, marker) - start = rank + 1 if rank else 0 - - cursor = (q for q in client.zrange(qset_key, start, - start + limit - 1)) - marker_next = {} - - def denormalizer(info, name): - queue = {'name': utils.descope_queue_name(name)} - marker_next['next'] = queue['name'] - if detailed: - queue['metadata'] = info[1] - - return queue - - yield utils.QueueListCursor(self._client, cursor, denormalizer) - yield marker_next and marker_next['next'] - - def _get(self, name, project=None): - """Obtain the metadata from the queue.""" - try: - return self.get_metadata(name, project) - except errors.QueueDoesNotExist: - return {} - - @utils.raises_conn_error - def _create(self, name, metadata=None, project=None): - # TODO(prashanthr_): Implement as a lua script. - queue_key = utils.scope_queue_name(name, project) - qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) - - # Check if the queue already exists. - if self._exists(name, project): - return False - - queue = { - 'c': 0, - 'cl': 0, - 'm': self._packer(metadata or {}), - 't': timeutils.utcnow_ts() - } - - # Pipeline ensures atomic inserts. - with self._client.pipeline() as pipe: - pipe.zadd(qset_key, 1, queue_key).hmset(queue_key, queue) - - try: - pipe.execute() - except redis.exceptions.ResponseError: - return False - - return True - - @utils.raises_conn_error - @utils.retries_on_connection_error - def _exists(self, name, project=None): - # TODO(prashanthr_): Cache this lookup - queue_key = utils.scope_queue_name(name, project) - qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) - - return self._client.zrank(qset_key, queue_key) is not None - - @utils.raises_conn_error - @utils.retries_on_connection_error - def set_metadata(self, name, metadata, project=None): - if not self.exists(name, project): - raise errors.QueueDoesNotExist(name, project) - - key = utils.scope_queue_name(name, project) - fields = {'m': self._packer(metadata)} - - self._client.hmset(key, fields) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def get_metadata(self, name, project=None): - if not self.exists(name, project): - raise errors.QueueDoesNotExist(name, project) - - queue_key = utils.scope_queue_name(name, project) - metadata = self._get_queue_info(queue_key, b'm', None)[0] - - return self._unpacker(metadata) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def _delete(self, name, project=None): - queue_key = utils.scope_queue_name(name, project) - qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) - - # NOTE(prashanthr_): Pipelining is used to mitigate race conditions - with self._client.pipeline() as pipe: - pipe.zrem(qset_key, queue_key) - pipe.delete(queue_key) - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def _stats(self, name, project=None): - pass diff --git a/zaqar/storage/redis/scripting.py b/zaqar/storage/redis/scripting.py deleted file mode 100644 index af6b6f4e..00000000 --- a/zaqar/storage/redis/scripting.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) 2014 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from zaqar.common import decorators - - -class Mixin(object): - script_names = [] - - @decorators.lazy_property(write=False) - def _scripts(self): - scripts = {} - - for name in self.script_names: - script = _read_script(name) - scripts[name] = self._client.register_script(script) - - return scripts - - -def _read_script(script_name): - folder = os.path.abspath(os.path.dirname(__file__)) - filename = os.path.join(folder, 'scripts', script_name + '.lua') - - with open(filename, 'r') as script_file: - return script_file.read() diff --git a/zaqar/storage/redis/scripts/claim_messages.lua b/zaqar/storage/redis/scripts/claim_messages.lua deleted file mode 100644 index 8dffb198..00000000 --- a/zaqar/storage/redis/scripts/claim_messages.lua +++ /dev/null @@ -1,101 +0,0 @@ ---[[ - -Copyright (c) 2014 Rackspace Hosting, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied. -See the License for the specific language governing permissions and -limitations under the License. - ---]] - --- Read params -local msgset_key = KEYS[1] - -local now = tonumber(ARGV[1]) -local limit = tonumber(ARGV[2]) -local claim_id = ARGV[3] -local claim_expires = tonumber(ARGV[4]) -local msg_ttl = tonumber(ARGV[5]) -local msg_expires = tonumber(ARGV[6]) - --- Scan for up to 'limit' unclaimed messages -local BATCH_SIZE = 100 - -local start = 0 -local claimed_msgs = {} -local msg_ids_to_cleanup = {} - -local found_unclaimed = false - -while (#claimed_msgs < limit) do - local stop = (start + BATCH_SIZE - 1) - local msg_ids = redis.call('ZRANGE', msgset_key, start, stop) - - if (#msg_ids == 0) then - break - end - - start = start + BATCH_SIZE - - -- TODO(kgriffs): Try moving claimed IDs to a different set - -- to avoid scanning through already-claimed messages. - for i, mid in ipairs(msg_ids) do - -- NOTE(kgriffs): Since execution of this script can not - -- happen in parallel, once we find the first unclaimed - -- message, the remaining messages will always be - -- unclaimed as well. - - if not found_unclaimed then - local msg = redis.call('HMGET', mid, 'c', 'c.e') - if msg[1] == false and msg[2] == false then - -- NOTE(Eva-i): It means the message expired and does not - -- actually exist anymore, we must later garbage collect it's - -- ID from the set and move on. - msg_ids_to_cleanup[#msg_ids_to_cleanup + 1] = mid - elseif msg[1] == '' or tonumber(msg[2]) <= now then - found_unclaimed = true - end - end - - if found_unclaimed then - -- Found an unclaimed message, so claim it. - local msg_expires_prev = redis.call('HGET', mid, 'e') - if msg_expires_prev ~= false then - -- NOTE(Eva-i): Condition above means the message is not - -- expired and we really can claim it. - redis.call('HMSET', mid, - 'c', claim_id, - 'c.e', claim_expires) - - -- Will the message expire early? - if tonumber(msg_expires_prev) < claim_expires then - redis.call('HMSET', mid, - 't', msg_ttl, - 'e', msg_expires) - end - - claimed_msgs[#claimed_msgs + 1] = mid - - if (#claimed_msgs == limit) then - break - end - end - end - end -end - -if (#msg_ids_to_cleanup ~= 0) then - -- Garbage collect expired message IDs stored in msgset_key. - redis.call('ZREM', msgset_key, unpack(msg_ids_to_cleanup)) -end - -return claimed_msgs diff --git a/zaqar/storage/redis/scripts/index_messages.lua b/zaqar/storage/redis/scripts/index_messages.lua deleted file mode 100644 index 95538375..00000000 --- a/zaqar/storage/redis/scripts/index_messages.lua +++ /dev/null @@ -1,39 +0,0 @@ ---[[ - -Copyright (c) 2014 Rackspace Hosting, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied. -See the License for the specific language governing permissions and -limitations under the License. - ---]] - --- Read params -local msgset_key = KEYS[1] -local counter_key = KEYS[2] - -local num_message_ids = tonumber(ARGV[1]) - --- Get next rank value -local rank_counter = tonumber(redis.call('GET', counter_key) or 1) - --- Add ranked message IDs -local zadd_args = {'ZADD', msgset_key} -for i = 0, (num_message_ids - 1) do - zadd_args[#zadd_args+1] = rank_counter + i - zadd_args[#zadd_args+1] = ARGV[2 + i] -end - -redis.call(unpack(zadd_args)) - --- Set next rank value -return redis.call('SET', counter_key, rank_counter + num_message_ids) diff --git a/zaqar/storage/redis/subscriptions.py b/zaqar/storage/redis/subscriptions.py deleted file mode 100644 index 934cb56d..00000000 --- a/zaqar/storage/redis/subscriptions.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import functools - -import msgpack -from oslo_utils import timeutils -from oslo_utils import uuidutils -import redis - -from zaqar.common import utils as common_utils -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.redis import models -from zaqar.storage.redis import utils - - -SubscriptionEnvelope = models.SubscriptionEnvelope - -SUBSET_INDEX_KEY = 'subset_index' -SUBSCRIPTION_IDS_SUFFIX = 'subscriptions' - - -class SubscriptionController(base.Subscription): - """Implements subscription resource operations using MongoDB. - - Subscriptions are unique by project + queue/topic + subscriber. - - Schema: - 's': source :: six.text_type - 'u': subscriber:: six.text_type - 't': ttl:: int - 'e': expires: int - 'o': options :: dict - 'p': project :: six.text_type - """ - def __init__(self, *args, **kwargs): - super(SubscriptionController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - self._packer = msgpack.Packer(encoding='utf-8', - use_bin_type=True).pack - self._unpacker = functools.partial(msgpack.unpackb, encoding='utf-8') - - @utils.raises_conn_error - @utils.retries_on_connection_error - def list(self, queue, project=None, marker=None, limit=10): - client = self._client - subset_key = utils.scope_subscription_ids_set(queue, - project, - SUBSCRIPTION_IDS_SUFFIX) - rank = client.zrank(subset_key, marker) - start = rank + 1 if rank is not None else 0 - - cursor = (q for q in client.zrange(subset_key, start, - start + limit - 1)) - marker_next = {} - - def denormalizer(record, sid): - now = timeutils.utcnow_ts() - ttl = int(record[2]) - expires = int(record[3]) - created = expires - ttl - is_confirmed = True - if len(record) == 6: - is_confirmed = record[5] == str(True) - ret = { - 'id': sid, - 'source': record[0], - 'subscriber': record[1], - 'ttl': ttl, - 'age': now - created, - 'options': self._unpacker(record[4]), - 'confirmed': is_confirmed, - } - marker_next['next'] = sid - - return ret - - yield utils.SubscriptionListCursor(self._client, cursor, denormalizer) - yield marker_next and marker_next['next'] - - @utils.raises_conn_error - @utils.retries_on_connection_error - def get(self, queue, subscription_id, project=None): - subscription = None - if self.exists(queue, subscription_id, project): - subscription = SubscriptionEnvelope.from_redis(subscription_id, - self._client) - if subscription: - now = timeutils.utcnow_ts() - return subscription.to_basic(now) - else: - raise errors.SubscriptionDoesNotExist(subscription_id) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def create(self, queue, subscriber, ttl, options, project=None): - subscription_id = uuidutils.generate_uuid() - subset_key = utils.scope_subscription_ids_set(queue, - project, - SUBSCRIPTION_IDS_SUFFIX) - - source = queue - now = timeutils.utcnow_ts() - expires = now + ttl - confirmed = False - - subscription = {'id': subscription_id, - 's': source, - 'u': subscriber, - 't': ttl, - 'e': expires, - 'o': self._packer(options), - 'p': project, - 'c': confirmed} - - try: - # Pipeline ensures atomic inserts. - with self._client.pipeline() as pipe: - if not self._is_duplicated_subscriber(subscriber, - queue, - project): - pipe.zadd(subset_key, 1, - subscription_id).hmset(subscription_id, - subscription) - pipe.expire(subscription_id, ttl) - pipe.execute() - else: - return None - return subscription_id - except redis.exceptions.ResponseError: - return None - - def _is_duplicated_subscriber(self, subscriber, queue, project): - """Check if the subscriber is existing or not. - - Given the limitation of Redis' expires(), it's hard to auto expire - subscriber from the set and subscription id from the sorted set, so - this method is used to do a ugly duplication check when adding a new - subscription so that we don't need the set for subscriber. And as a - side effect, this method will remove the unreachable subscription's id - from the sorted set. - """ - subset_key = utils.scope_subscription_ids_set(queue, - project, - SUBSCRIPTION_IDS_SUFFIX) - try: - sub_ids = (q for q in self._client.zrange(subset_key, 0, -1)) - for s_id in sub_ids: - subscription = self._client.hmget(s_id, - ['s', 'u', 't', 'o', 'c']) - if subscription == [None, None, None, None, None]: - # NOTE(flwang): Under this check, that means the - # subscription has been expired. So redis can't get - # the subscription but the id is still there. So let's - # delete the id for clean up. - self._client.zrem(subset_key, s_id) - if subscription[1] == subscriber: - return True - return False - except redis.exceptions.ResponseError: - return True - - @utils.raises_conn_error - @utils.retries_on_connection_error - def exists(self, queue, subscription_id, project=None): - subset_key = utils.scope_subscription_ids_set(queue, project, - SUBSCRIPTION_IDS_SUFFIX) - - return self._client.zrank(subset_key, subscription_id) is not None - - @utils.raises_conn_error - @utils.retries_on_connection_error - def update(self, queue, subscription_id, project=None, **kwargs): - names = ('subscriber', 'ttl', 'options') - key_transform = lambda x: 'u' if x == 'subscriber' else x[0] - fields = common_utils.fields(kwargs, names, - pred=lambda x: x is not None, - key_transform=key_transform) - assert fields, ('`subscriber`, `ttl`, ' - 'or `options` not found in kwargs') - - # Let's get our subscription by ID. If it does not exist, - # SubscriptionDoesNotExist error will be raised internally. - subscription_to_update = self.get(queue, subscription_id, - project=project) - - new_subscriber = fields.get('u') - - # Let's do some checks to prevent subscription duplication. - if new_subscriber: - # Check if 'new_subscriber' is really new for our subscription. - if subscription_to_update['subscriber'] != new_subscriber: - # It's new. We should raise error if this subscriber already - # exists for the queue and project. - if self._is_duplicated_subscriber(new_subscriber, queue, - project): - raise errors.SubscriptionAlreadyExists() - - # NOTE(Eva-i): if there are new options, we need to pack them before - # sending to the database. - new_options = fields.get('o') - if new_options is not None: - fields['o'] = self._packer(new_options) - - new_ttl = fields.get('t') - if new_ttl is not None: - now = timeutils.utcnow_ts() - expires = now + new_ttl - fields['e'] = expires - - # Pipeline ensures atomic inserts. - with self._client.pipeline() as pipe: - pipe.hmset(subscription_id, fields) - if new_ttl is not None: - pipe.expire(subscription_id, new_ttl) - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def delete(self, queue, subscription_id, project=None): - subset_key = utils.scope_subscription_ids_set(queue, project, - SUBSCRIPTION_IDS_SUFFIX) - - if self._client.zrank(subset_key, subscription_id) is not None: - # NOTE(prashanthr_): Pipelining is used to mitigate race conditions - with self._client.pipeline() as pipe: - pipe.zrem(subset_key, subscription_id) - pipe.delete(subscription_id) - pipe.execute() - - @utils.raises_conn_error - @utils.retries_on_connection_error - def get_with_subscriber(self, queue, subscriber, project=None): - subset_key = utils.scope_subscription_ids_set(queue, - project, - SUBSCRIPTION_IDS_SUFFIX) - sub_ids = (q for q in self._client.zrange(subset_key, 0, -1)) - for s_id in sub_ids: - subscription = self._client.hmget(s_id, - ['s', 'u', 't', 'o', 'c']) - if subscription[1] == subscriber: - subscription = SubscriptionEnvelope.from_redis(s_id, - self._client) - now = timeutils.utcnow_ts() - return subscription.to_basic(now) - - @utils.raises_conn_error - @utils.retries_on_connection_error - def confirm(self, queue, subscription_id, project=None, confirmed=True): - # Let's get our subscription by ID. If it does not exist, - # SubscriptionDoesNotExist error will be raised internally. - self.get(queue, subscription_id, project=project) - - fields = {'c': confirmed} - with self._client.pipeline() as pipe: - pipe.hmset(subscription_id, fields) - pipe.execute() diff --git a/zaqar/storage/redis/utils.py b/zaqar/storage/redis/utils.py deleted file mode 100644 index 6759b6f3..00000000 --- a/zaqar/storage/redis/utils.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import sys -import time - -from oslo_log import log as logging -from oslo_utils import encodeutils -import redis -import six - -from zaqar.storage import errors - -LOG = logging.getLogger(__name__) -MESSAGE_IDS_SUFFIX = 'messages' -SUBSCRIPTION_IDS_SUFFIX = 'subscriptions' - - -def descope_queue_name(scoped_name): - """Descope Queue name with '.'. - - Returns the queue name from the scoped name - which is of the form project-id.queue-name - """ - - return scoped_name.split('.')[1] - - -def normalize_none_str(string_or_none): - """Returns '' IFF given value is None, passthrough otherwise. - - This function normalizes None to the empty string to facilitate - string concatenation when a variable could be None. - """ - - # TODO(prashanthr_) : Try to reuse this utility. Violates DRY - return '' if string_or_none is None else string_or_none - - -def scope_queue_name(queue=None, project=None): - """Returns a scoped name for a queue based on project and queue. - - If only the project name is specified, a scope signifying "all queues" - for that project is returned. If neither queue nor project are - specified, a scope for "all global queues" is returned, which - is to be interpreted as excluding queues scoped by project. - - :returns: '{project}.{queue}' if project and queue are given, - '{project}.' if ONLY project is given, '.{queue}' if ONLY - queue is given, and '.' if neither are given. - """ - - # TODO(prashanthr_) : Try to reuse this utility. Violates DRY - return normalize_none_str(project) + '.' + normalize_none_str(queue) - -# NOTE(prashanthr_): Aliase the scope_queue_name function -# to be used in the pools and claims controller as similar -# functionality is required to scope redis id's. -scope_pool_catalogue = scope_claim_messages = scope_queue_name - - -def scope_message_ids_set(queue=None, project=None, message_suffix=''): - """Scope messages set with '.' - - Returns a scoped name for the list of messages in the form - project-id_queue-name_suffix - """ - - return (normalize_none_str(project) + '.' + - normalize_none_str(queue) + '.' + - message_suffix) - - -def descope_message_ids_set(msgset_key): - """Descope messages set with '.' - - :returns: (queue, project) - """ - - tokens = msgset_key.split('.') - - return tokens[1] or None, tokens[0] or None - - -def scope_subscription_ids_set(queue=None, project=None, - subscription_suffix=''): - """Scope subscriptions set with '.' - - Returns a scoped name for the list of subscriptions in the form - project-id_queue-name_suffix - """ - - return (normalize_none_str(project) + '.' + - normalize_none_str(queue) + '.' + - subscription_suffix) - - -def descope_subscription_ids_set(subset_key): - """Descope subscriptions set with '.' - - :returns: (queue, project) - """ - - tokens = subset_key.split('.') - - return (tokens[1] or None, tokens[0] or None) - - -# NOTE(prashanthr_): Aliasing the scope_message_ids_set function -# to be used in the pools and claims controller as similar -# functionality is required to scope redis id's. -scope_queue_catalogue = scope_claims_set = scope_message_ids_set -scope_queue_index = scope_message_ids_set - - -def msgset_key(queue, project=None): - return scope_message_ids_set(queue, project, MESSAGE_IDS_SUFFIX) - - -def subset_key(queue, project=None): - return scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) - - -def raises_conn_error(func): - """Handles the Redis ConnectionFailure error. - - This decorator catches Redis's ConnectionError - and raises Zaqar's ConnectionError instead. - """ - - # Note(prashanthr_) : Try to reuse this utility. Violates DRY - # Can pass exception type into the decorator and create a - # storage level utility. - - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except redis.exceptions.ConnectionError as ex: - LOG.exception(ex) - raise errors.ConnectionError() - - return wrapper - - -def retries_on_connection_error(func): - """Causes the wrapped function to be re-called on ConnectionError. - - This decorator catches Redis ConnectionError and retries - the function call. - - .. Note:: - Assumes that the decorated function has defined self.driver.redis_cinf - so that `max_reconnect_attempts` and `reconnect_sleep` can be taken - into account. - - .. Warning:: The decorated function must be idempotent. - """ - - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - # TODO(prashanthr_) : Try to reuse this utility. Violates DRY - # Can pass config parameters into the decorator and create a - # storage level utility. - - max_attemps = self.driver.redis_conf.max_reconnect_attempts - sleep_sec = self.driver.redis_conf.reconnect_sleep - - for attempt in range(max_attemps): - try: - return func(self, *args, **kwargs) - - except redis.exceptions.ConnectionError: - # NOTE(kgriffs): redis-py will retry once itself, - # but if the command cannot be sent the second time after - # disconnecting and reconnecting, the error is raised - # and we will catch it here. - # - # NOTE(kgriffs): When using a sentinel, if a master fails - # the initial retry will gracefully fail over to the - # new master if the sentinel failover delay is low enough; - # if the delay is too long, then redis-py will get a - # MasterNotFoundError (a subclass of ConnectionError) on - # it's retry, which will then just get raised and caught - # here, in which case we will keep retrying until the - # sentinel completes the failover and stops raising - # MasterNotFoundError. - - ex = sys.exc_info()[1] - LOG.warning(u'Caught ConnectionError, retrying the ' - 'call to {0}'.format(func)) - - time.sleep(sleep_sec * (2 ** attempt)) - else: - LOG.error(u'Caught ConnectionError, maximum attempts ' - 'to {0} exceeded.'.format(func)) - raise ex - - return wrapper - - -def msg_claimed_filter(message, now): - """Return True IFF the message is currently claimed.""" - - return message.claim_id and (now < message.claim_expires) - - -def msg_echo_filter(message, client_uuid): - """Return True IFF the specified client posted the message.""" - - return message.client_uuid == six.text_type(client_uuid) - - -def msg_expired_filter(message, now): - """Return True IFF the message has expired.""" - - return message.expires <= now - - -class QueueListCursor(object): - - def __init__(self, client, queues, denormalizer): - self.queue_iter = queues - self.denormalizer = denormalizer - self.client = client - - def __iter__(self): - return self - - @raises_conn_error - def next(self): - curr = next(self.queue_iter) - queue = self.client.hmget(curr, ['c', 'm']) - return self.denormalizer(queue, encodeutils.safe_decode(curr)) - - def __next__(self): - return self.next() - - -class SubscriptionListCursor(object): - - def __init__(self, client, subscriptions, denormalizer): - self.subscription_iter = subscriptions - self.denormalizer = denormalizer - self.client = client - - def __iter__(self): - return self - - @raises_conn_error - def next(self): - curr = next(self.subscription_iter) - subscription = self.client.hmget(curr, ['s', 'u', 't', 'e', 'o', 'c']) - # NOTE(flwang): The expired subscription will be removed - # automatically, but the key can't be deleted automatically as well. - # Though we clean up those expired ids when create new subscription, - # we still need to filter them out before a new subscription creation. - if not subscription[0]: - return self.next() - return self.denormalizer(subscription, encodeutils.safe_decode(curr)) - - def __next__(self): - return self.next() diff --git a/zaqar/storage/sqlalchemy/__init__.py b/zaqar/storage/sqlalchemy/__init__.py deleted file mode 100644 index d28c82d6..00000000 --- a/zaqar/storage/sqlalchemy/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2014 Rackspace Hosting Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.storage.sqlalchemy import driver - -# Hoist classes into package namespace -ControlDriver = driver.ControlDriver diff --git a/zaqar/storage/sqlalchemy/catalogue.py b/zaqar/storage/sqlalchemy/catalogue.py deleted file mode 100644 index 09bc8f50..00000000 --- a/zaqar/storage/sqlalchemy/catalogue.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2014 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Sql storage controller for the queues catalogue. - -Serves to construct an association between a project + queue -> pool - -name: string -> Pools.name -project: string -queue: string -""" - -import oslo_db.exception -import sqlalchemy as sa - -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.sqlalchemy import tables - - -def _match(project, queue): - clauses = [ - tables.Catalogue.c.project == project, - tables.Catalogue.c.queue == queue - ] - return sa.sql.and_(*clauses) - - -class CatalogueController(base.CatalogueBase): - - def list(self, project): - stmt = sa.sql.select([tables.Catalogue]).where( - tables.Catalogue.c.project == project - ) - cursor = self.driver.run(stmt) - return (_normalize(v) for v in cursor) - - def get(self, project, queue): - stmt = sa.sql.select([tables.Catalogue]).where( - _match(project, queue) - ) - entry = self.driver.run(stmt).fetchone() - - if entry is None: - raise errors.QueueNotMapped(queue, project) - - return _normalize(entry) - - def exists(self, project, queue): - try: - return self.get(project, queue) is not None - except errors.QueueNotMapped: - return False - - def insert(self, project, queue, pool): - try: - stmt = sa.sql.insert(tables.Catalogue).values( - project=project, queue=queue, pool=pool - ) - self.driver.run(stmt) - - except oslo_db.exception.DBReferenceError: - self._update(project, queue, pool) - except oslo_db.exception.DBDuplicateError: - self._update(project, queue, pool) - - def delete(self, project, queue): - stmt = sa.sql.delete(tables.Catalogue).where( - _match(project, queue) - ) - self.driver.run(stmt) - - def _update(self, project, queue, pool): - stmt = sa.sql.update(tables.Catalogue).where( - _match(project, queue) - ).values(pool=pool) - self.driver.run(stmt) - - def update(self, project, queue, pool=None): - if pool is None: - return - - if not self.exists(project, queue): - raise errors.QueueNotMapped(queue, project) - - self._update(project, queue, pool) - - def drop_all(self): - stmt = sa.sql.expression.delete(tables.Catalogue) - self.driver.run(stmt) - - -def _normalize(entry): - name, project, queue = entry - return { - 'queue': queue, - 'project': project, - 'pool': name - } diff --git a/zaqar/storage/sqlalchemy/controllers.py b/zaqar/storage/sqlalchemy/controllers.py deleted file mode 100644 index ae873961..00000000 --- a/zaqar/storage/sqlalchemy/controllers.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# Copyright (c) 2014 Rackspace Hosting Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from zaqar.storage.sqlalchemy import catalogue -from zaqar.storage.sqlalchemy import flavors -from zaqar.storage.sqlalchemy import pools -from zaqar.storage.sqlalchemy import queues - - -QueueController = queues.QueueController -CatalogueController = catalogue.CatalogueController -PoolsController = pools.PoolsController -FlavorsController = flavors.FlavorsController diff --git a/zaqar/storage/sqlalchemy/driver.py b/zaqar/storage/sqlalchemy/driver.py deleted file mode 100644 index 394734e8..00000000 --- a/zaqar/storage/sqlalchemy/driver.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# Copyright 2014 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_db.sqlalchemy import engines -from osprofiler import profiler -from osprofiler import sqlalchemy as sa_tracer -import sqlalchemy as sa - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage.sqlalchemy import controllers -from zaqar.storage.sqlalchemy import options - - -class ControlDriver(storage.ControlDriverBase): - - def __init__(self, conf, cache): - super(ControlDriver, self).__init__(conf, cache) - self.conf.register_opts(options.MANAGEMENT_SQLALCHEMY_OPTIONS, - group=options.MANAGEMENT_SQLALCHEMY_GROUP) - self.sqlalchemy_conf = self.conf[options.MANAGEMENT_SQLALCHEMY_GROUP] - - def _mysql_on_connect(self, conn, record): - # NOTE(flaper87): This is necessary in order - # to ensure that all date operations in mysql - # happen in UTC, `now()` for example. - conn.query('SET time_zone = "+0:00"') - - @decorators.lazy_property(write=False) - def engine(self): - uri = self.sqlalchemy_conf.uri - engine = engines.create_engine(uri, sqlite_fk=True) - - if (uri.startswith('mysql://') or - uri.startswith('mysql+pymysql://')): - # oslo_db.create_engine makes a test connection, throw that out - # first. mysql time_zone can be added to oslo_db as a - # startup option - engine.dispose() - sa.event.listen(engine, 'connect', - self._mysql_on_connect) - - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - sa_tracer.add_tracing(sa, engine, "db") - - return engine - - # TODO(cpp-cabrera): expose connect/close as a context manager - # that acquires the connection to the DB for the desired scope and - # closes it once the operations are completed - # TODO(wangxiyuan): we should migrate to oslo.db asap. - def run(self, *args, **kwargs): - return self.engine.execute(*args, **kwargs) - - def close(self): - pass - - @property - def pools_controller(self): - controller = controllers.PoolsController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_management_store): - return profiler.trace_cls("sqlalchemy_pools_" - "controller")(controller) - else: - return controller - - @property - def queue_controller(self): - controller = controllers.QueueController(self) - if (self.conf.profiler.enabled and - (self.conf.profiler.trace_message_store or - self.conf.profiler.trace_management_store)): - return profiler.trace_cls("sqlalchemy_queue_" - "controller")(controller) - else: - return controller - - @property - def catalogue_controller(self): - controller = controllers.CatalogueController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_management_store): - return profiler.trace_cls("sqlalchemy_catalogue_" - "controller")(controller) - else: - return controller - - @property - def flavors_controller(self): - controller = controllers.FlavorsController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_management_store): - return profiler.trace_cls("sqlalchemy_flavors_" - "controller")(controller) - else: - return controller diff --git a/zaqar/storage/sqlalchemy/flavors.py b/zaqar/storage/sqlalchemy/flavors.py deleted file mode 100644 index 4a0030d3..00000000 --- a/zaqar/storage/sqlalchemy/flavors.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -"""flavors: an implementation of the flavor management storage -controller for sqlalchemy. - -""" - -import oslo_db.exception -import sqlalchemy as sa - -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.sqlalchemy import tables -from zaqar.storage.sqlalchemy import utils - - -class FlavorsController(base.FlavorsBase): - - def __init__(self, *args, **kwargs): - super(FlavorsController, self).__init__(*args, **kwargs) - self._pools_ctrl = self.driver.pools_controller - - @utils.raises_conn_error - def list(self, project=None, marker=None, limit=10, detailed=False): - marker = marker or '' - - # TODO(cpp-cabrera): optimization - limit the columns returned - # when detailed=False by specifying them in the select() - # clause - stmt = sa.sql.select([tables.Flavors]).where( - sa.and_(tables.Flavors.c.name > marker, - tables.Flavors.c.project == project) - ) - - if limit > 0: - stmt = stmt.limit(limit) - cursor = self.driver.run(stmt) - - marker_name = {} - - def it(): - for cur in cursor: - marker_name['next'] = cur[0] - yield _normalize(cur, detailed=detailed) - - yield it() - yield marker_name and marker_name['next'] - - @utils.raises_conn_error - def get(self, name, project=None, detailed=False): - stmt = sa.sql.select([tables.Flavors]).where( - sa.and_(tables.Flavors.c.name == name, - tables.Flavors.c.project == project) - ) - - flavor = self.driver.run(stmt).fetchone() - if flavor is None: - raise errors.FlavorDoesNotExist(name) - - return _normalize(flavor, detailed) - - @utils.raises_conn_error - def create(self, name, pool_group, project=None, capabilities=None): - cap = None if capabilities is None else utils.json_encode(capabilities) - - try: - stmt = sa.sql.expression.insert(tables.Flavors).values( - name=name, pool_group=pool_group, project=project, - capabilities=cap - ) - self.driver.run(stmt) - except oslo_db.exception.DBDuplicateEntry: - if not self._pools_ctrl.get_pools_by_group(pool_group): - raise errors.PoolGroupDoesNotExist(pool_group) - - # TODO(flaper87): merge update/create into a single - # method with introduction of upsert - self.update(name, pool_group=pool_group, - project=project, - capabilities=cap) - - @utils.raises_conn_error - def exists(self, name, project=None): - stmt = sa.sql.select([tables.Flavors.c.name]).where( - sa.and_(tables.Flavors.c.name == name, - tables.Flavors.c.project == project) - ).limit(1) - return self.driver.run(stmt).fetchone() is not None - - @utils.raises_conn_error - def update(self, name, project=None, pool_group=None, capabilities=None): - fields = {} - - if capabilities is not None: - fields['capabilities'] = capabilities - - if pool_group is not None: - fields['pool_group'] = pool_group - - assert fields, '`pool_group` or `capabilities` not found in kwargs' - if 'capabilities' in fields: - fields['capabilities'] = utils.json_encode(fields['capabilities']) - - stmt = sa.sql.update(tables.Flavors).where( - sa.and_(tables.Flavors.c.name == name, - tables.Flavors.c.project == project)).values(**fields) - - res = self.driver.run(stmt) - if res.rowcount == 0: - raise errors.FlavorDoesNotExist(name) - - @utils.raises_conn_error - def delete(self, name, project=None): - stmt = sa.sql.expression.delete(tables.Flavors).where( - sa.and_(tables.Flavors.c.name == name, - tables.Flavors.c.project == project) - ) - self.driver.run(stmt) - - @utils.raises_conn_error - def drop_all(self): - stmt = sa.sql.expression.delete(tables.Flavors) - self.driver.run(stmt) - - -def _normalize(flavor, detailed=False): - ret = { - 'name': flavor[0], - 'pool_group': flavor[2], - } - - if detailed: - capabilities = flavor[3] - ret['capabilities'] = (utils.json_decode(capabilities) - if capabilities else {}) - - return ret diff --git a/zaqar/storage/sqlalchemy/migration/__init__.py b/zaqar/storage/sqlalchemy/migration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/storage/sqlalchemy/migration/alembic.ini b/zaqar/storage/sqlalchemy/migration/alembic.ini deleted file mode 100644 index 2e8b2f41..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic.ini +++ /dev/null @@ -1,54 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = zaqar/storage/sqlalchemy/migration/alembic_migrations - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -sqlalchemy.url = - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/README.md b/zaqar/storage/sqlalchemy/migration/alembic_migrations/README.md deleted file mode 100644 index b2c77a15..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/README.md +++ /dev/null @@ -1,73 +0,0 @@ - - -The migrations in `alembic_migrations/versions` contain the changes needed to migrate -between Zaqar database revisions. A migration occurs by executing a script that -details the changes needed to upgrade the database. The migration scripts -are ordered so that multiple scripts can run sequentially. The scripts are executed by -Zaqar's migration wrapper which uses the Alembic library to manage the migration. Zaqar -supports migration from Liberty or later. - -You can upgrade to the latest database version via: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade head -``` - -To check the current database version: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf current -``` - -To create a script to run the migration offline: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade head --sql -``` - -To run the offline migration between specific migration versions: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade : --sql -``` - -Upgrade the database incrementally: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade --delta <# of revs> -``` - -Create new revision: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf revision -m "description of revision" --autogenerate -``` - -Create a blank file: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf revision -m "description of revision" -``` - -This command does not perform any migrations, it only sets the revision. -Revision may be any existing revision. Use this command carefully. -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf stamp -``` - -To verify that the timeline does branch, you can run this command: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf check_migration -``` - -If the migration path does branch, you can find the branch point via: -``` -$ zaqar-sql-db-manage --config-file /path/to/zaqar.conf history -``` diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/env.py b/zaqar/storage/sqlalchemy/migration/alembic_migrations/env.py deleted file mode 100644 index 7a667c26..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/env.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Based on Neutron's migration/cli.py - -from __future__ import with_statement -from logging import config as c - -from alembic import context -from oslo_utils import importutils -from sqlalchemy import create_engine -from sqlalchemy import pool - -from zaqar.storage.sqlalchemy import tables - - -importutils.try_import('zaqar.storage.sqlalchemy.tables') - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config -zaqar_config = config.zaqar_config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -c.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = tables.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - context.configure( - url=zaqar_config['drivers:management_store:sqlalchemy'].uri) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - engine = create_engine( - zaqar_config['drivers:management_store:sqlalchemy'].uri, - poolclass=pool.NullPool) - - connection = engine.connect() - context.configure( - connection=connection, - target_metadata=target_metadata) - - try: - with context.begin_transaction(): - context.run_migrations() - finally: - connection.close() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/script.py.mako b/zaqar/storage/sqlalchemy/migration/alembic_migrations/script.py.mako deleted file mode 100644 index f70210dd..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/script.py.mako +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright ${create_date.year} OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/001_liberty.py b/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/001_liberty.py deleted file mode 100644 index 927d226a..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/001_liberty.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2016 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Liberty release - -Revision ID: 001 -Revises: None -Create Date: 2015-09-13 20:46:25.783444 - -""" - -# revision identifiers, used by Alembic. -revision = '001' -down_revision = None - -from alembic import op -import sqlalchemy as sa - -MYSQL_ENGINE = 'InnoDB' -MYSQL_CHARSET = 'utf8' - - -def upgrade(): - op.create_table('Queues', - sa.Column('id', sa.INTEGER, primary_key=True), - sa.Column('project', sa.String(64)), - sa.Column('name', sa.String(64)), - sa.Column('metadata', sa.LargeBinary), - sa.UniqueConstraint('project', 'name')) - - op.create_table('PoolGroup', - sa.Column('name', sa.String(64), primary_key=True)) - - op.create_table('Pools', - sa.Column('name', sa.String(64), primary_key=True), - sa.Column('group', sa.String(64), - sa.ForeignKey('PoolGroup.name', - ondelete='CASCADE'), - nullable=True), - sa.Column('uri', sa.String(255), - unique=True, nullable=False), - sa.Column('weight', sa.INTEGER, nullable=False), - sa.Column('options', sa.Text())) - - op.create_table('Flavors', - sa.Column('name', sa.String(64), primary_key=True), - sa.Column('project', sa.String(64)), - sa.Column('pool_group', sa.String(64), - sa.ForeignKey('PoolGroup.name', - ondelete='CASCADE'), - nullable=False), - sa.Column('capabilities', sa.Text())) - - op.create_table('Catalogue', - sa.Column('pool', sa.String(64), - sa.ForeignKey('Pools.name', - ondelete='CASCADE')), - sa.Column('project', sa.String(64)), - sa.Column('queue', sa.String(64), nullable=False), - sa.UniqueConstraint('project', 'queue')) diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/002_placeholder.py b/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/002_placeholder.py deleted file mode 100644 index 12590ce4..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/002_placeholder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2016 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""placeholder - -Revision ID: 002 -Revises: 001 -Create Date: 2014-04-01 21:04:47.941098 - -""" - -# revision identifiers, used by Alembic. -revision = '002' -down_revision = '001' - - -def upgrade(): - pass diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/003_placeholder.py b/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/003_placeholder.py deleted file mode 100644 index 5bd06bd5..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/003_placeholder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""placeholder - -Revision ID: 003 -Revises: 002 -Create Date: 2014-04-01 21:05:00.270366 - -""" - -# revision identifiers, used by Alembic. -revision = '003' -down_revision = '002' - - -def upgrade(): - pass diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/004_placeholder.py b/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/004_placeholder.py deleted file mode 100644 index 434dac75..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/004_placeholder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""placeholder - -Revision ID: 004 -Revises: 003 -Create Date: 2014-04-01 21:04:57.627883 - -""" - -# revision identifiers, used by Alembic. -revision = '004' -down_revision = '003' - - -def upgrade(): - pass diff --git a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/005_placeholder.py b/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/005_placeholder.py deleted file mode 100644 index c8f4f876..00000000 --- a/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/005_placeholder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""placeholder - -Revision ID: 005 -Revises: 004 -Create Date: 2014-04-01 21:04:54.928605 - -""" - -# revision identifiers, used by Alembic. -revision = '005' -down_revision = '004' - - -def upgrade(): - pass diff --git a/zaqar/storage/sqlalchemy/migration/cli.py b/zaqar/storage/sqlalchemy/migration/cli.py deleted file mode 100644 index 061303ce..00000000 --- a/zaqar/storage/sqlalchemy/migration/cli.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) 2016 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from alembic import command as alembic_cmd -from alembic import config as alembic_cfg -from alembic import util as alembic_u -from oslo_config import cfg - -CONF = cfg.CONF - - -def do_alembic_command(config, cmd, *args, **kwargs): - try: - getattr(alembic_cmd, cmd)(config, *args, **kwargs) - except alembic_u.CommandError as e: - alembic_u.err(str(e)) - - -def do_check_migration(config, _cmd): - do_alembic_command(config, 'branches') - - -def do_upgrade_downgrade(config, cmd): - if not CONF.command.revision and not CONF.command.delta: - raise SystemExit('You must provide a revision or relative delta') - - revision = CONF.command.revision - - if CONF.command.delta: - sign = '+' if CONF.command.name == 'upgrade' else '-' - revision = sign + str(CONF.command.delta) - - do_alembic_command(config, cmd, revision, sql=CONF.command.sql) - - -def do_stamp(config, cmd): - do_alembic_command(config, cmd, - CONF.command.revision, - sql=CONF.command.sql) - - -def do_revision(config, cmd): - do_alembic_command(config, cmd, - message=CONF.command.message, - autogenerate=CONF.command.autogenerate, - sql=CONF.command.sql) - - -def add_command_parsers(subparsers): - for name in ['current', 'history', 'branches']: - parser = subparsers.add_parser(name) - parser.set_defaults(func=do_alembic_command) - - parser = subparsers.add_parser('check_migration') - parser.set_defaults(func=do_check_migration) - - for name in ['upgrade', 'downgrade']: - parser = subparsers.add_parser(name) - parser.add_argument('--delta', type=int) - parser.add_argument('--sql', action='store_true') - parser.add_argument('revision', nargs='?') - parser.set_defaults(func=do_upgrade_downgrade) - - parser = subparsers.add_parser('stamp') - parser.add_argument('--sql', action='store_true') - parser.add_argument('revision') - parser.set_defaults(func=do_stamp) - - parser = subparsers.add_parser('revision') - parser.add_argument('-m', '--message') - parser.add_argument('--autogenerate', action='store_true') - parser.add_argument('--sql', action='store_true') - parser.set_defaults(func=do_revision) - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help='Available commands', - handler=add_command_parsers) - -CONF.register_cli_opt(command_opt) - -sqlalchemy_opts = [cfg.StrOpt('uri', - help='The SQLAlchemy connection string to' - ' use to connect to the database.', - secret=True)] - -CONF.register_opts(sqlalchemy_opts, - group='drivers:management_store:sqlalchemy') - - -def main(): - config = alembic_cfg.Config( - os.path.join(os.path.dirname(__file__), 'alembic.ini') - ) - config.set_main_option('script_location', - 'zaqar.storage.sqlalchemy.' - 'migration:alembic_migrations') - - # attach the octavia conf to the Alembic conf - config.zaqar_config = CONF - - CONF(project='zaqar') - CONF.command.func(config, CONF.command.name) diff --git a/zaqar/storage/sqlalchemy/options.py b/zaqar/storage/sqlalchemy/options.py deleted file mode 100644 index 55603d92..00000000 --- a/zaqar/storage/sqlalchemy/options.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -"""SQLAlchemy storage driver implementation.""" - -from oslo_config import cfg - -_deprecated_group = 'drivers:storage:sqlalchemy' -_COMMON_SQLALCHEMY_OPTIONS = ( - cfg.StrOpt('uri', default='sqlite:///:memory:', - deprecated_opts=[cfg.DeprecatedOpt( - 'uri', - group=_deprecated_group), ], - help='An sqlalchemy URL'), -) - -MANAGEMENT_SQLALCHEMY_OPTIONS = _COMMON_SQLALCHEMY_OPTIONS - -MANAGEMENT_SQLALCHEMY_GROUP = 'drivers:management_store:sqlalchemy' - - -def _config_options(): - return [(MANAGEMENT_SQLALCHEMY_GROUP, MANAGEMENT_SQLALCHEMY_OPTIONS)] diff --git a/zaqar/storage/sqlalchemy/pools.py b/zaqar/storage/sqlalchemy/pools.py deleted file mode 100644 index 1156479d..00000000 --- a/zaqar/storage/sqlalchemy/pools.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -"""pools: an implementation of the pool management storage -controller for sqlalchemy. - -""" - -import functools - -import oslo_db.exception -import sqlalchemy as sa - -from zaqar.common import utils as common_utils -from zaqar.storage import base -from zaqar.storage import errors -from zaqar.storage.sqlalchemy import tables -from zaqar.storage.sqlalchemy import utils - - -class PoolsController(base.PoolsBase): - - @utils.raises_conn_error - def _list(self, marker=None, limit=10, detailed=False): - marker = marker or '' - - # TODO(cpp-cabrera): optimization - limit the columns returned - # when detailed=False by specifying them in the select() - # clause - stmt = sa.sql.select([tables.Pools]).where( - tables.Pools.c.name > marker - ) - if limit > 0: - stmt = stmt.limit(limit) - cursor = self.driver.run(stmt) - - marker_name = {} - - def it(): - for cur in cursor: - marker_name['next'] = cur[0] - yield _normalize(cur, detailed=detailed) - - yield it() - yield marker_name and marker_name['next'] - - @utils.raises_conn_error - def _get_pools_by_group(self, group=None, detailed=False): - stmt = sa.sql.select([tables.Pools]).where( - tables.Pools.c.group == group - ) - cursor = self.driver.run(stmt) - - normalizer = functools.partial(_normalize, detailed=detailed) - return (normalizer(v) for v in cursor) - - @utils.raises_conn_error - def _get(self, name, detailed=False): - stmt = sa.sql.select([tables.Pools]).where( - tables.Pools.c.name == name - ) - - pool = self.driver.run(stmt).fetchone() - if pool is None: - raise errors.PoolDoesNotExist(name) - - return _normalize(pool, detailed) - - def _ensure_group_exists(self, name): - try: - stmt = sa.sql.expression.insert(tables.PoolGroup).values(name=name) - self.driver.run(stmt) - return True - except oslo_db.exception.DBDuplicateEntry: - return False - - # TODO(cpp-cabrera): rename to upsert - @utils.raises_conn_error - def _create(self, name, weight, uri, group=None, options=None): - opts = None if options is None else utils.json_encode(options) - - if group is not None: - self._ensure_group_exists(group) - - try: - stmt = sa.sql.expression.insert(tables.Pools).values( - name=name, weight=weight, uri=uri, group=group, options=opts - ) - self.driver.run(stmt) - - except oslo_db.exception.DBDuplicateEntry: - # TODO(cpp-cabrera): merge update/create into a single - # method with introduction of upsert - self._update(name, weight=weight, uri=uri, - group=group, options=options) - - @utils.raises_conn_error - def _exists(self, name): - stmt = sa.sql.select([tables.Pools.c.name]).where( - tables.Pools.c.name == name - ).limit(1) - return self.driver.run(stmt).fetchone() is not None - - @utils.raises_conn_error - def _update(self, name, **kwargs): - # NOTE(cpp-cabrera): by pruning None-valued kwargs, we avoid - # overwriting the existing options field with None, since that - # one can be null. - names = ('uri', 'weight', 'group', 'options') - fields = common_utils.fields(kwargs, names, - pred=lambda x: x is not None) - - assert fields, ('`weight`, `uri`, `group`, ' - 'or `options` not found in kwargs') - - if 'options' in fields: - fields['options'] = utils.json_encode(fields['options']) - - if fields.get('group') is not None: - self._ensure_group_exists(fields.get('group')) - - stmt = sa.sql.update(tables.Pools).where( - tables.Pools.c.name == name).values(**fields) - - res = self.driver.run(stmt) - if res.rowcount == 0: - raise errors.PoolDoesNotExist(name) - - @utils.raises_conn_error - def _delete(self, name): - stmt = sa.sql.expression.delete(tables.Pools).where( - tables.Pools.c.name == name - ) - self.driver.run(stmt) - - @utils.raises_conn_error - def _drop_all(self): - stmt = sa.sql.expression.delete(tables.Pools) - self.driver.run(stmt) - stmt = sa.sql.expression.delete(tables.PoolGroup) - self.driver.run(stmt) - - -def _normalize(pool, detailed=False): - ret = { - 'name': pool[0], - 'group': pool[1], - 'uri': pool[2], - 'weight': pool[3], - } - if detailed: - opts = pool[4] - ret['options'] = utils.json_decode(opts) if opts else {} - - return ret diff --git a/zaqar/storage/sqlalchemy/queues.py b/zaqar/storage/sqlalchemy/queues.py deleted file mode 100644 index ea7e962c..00000000 --- a/zaqar/storage/sqlalchemy/queues.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import oslo_db.exception -import sqlalchemy as sa - -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.sqlalchemy import tables -from zaqar.storage.sqlalchemy import utils - - -class QueueController(storage.Queue): - - def _list(self, project, marker=None, - limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False): - - if project is None: - project = '' - - fields = [tables.Queues.c.name] - if detailed: - fields.append(tables.Queues.c.metadata) - - if marker: - sel = sa.sql.select(fields, sa.and_( - tables.Queues.c.project == project, - tables.Queues.c.name > marker)) - else: - sel = sa.sql.select(fields, tables.Queues.c.project == project) - - sel = sel.order_by(sa.asc(tables.Queues.c.name)).limit(limit) - records = self.driver.run(sel) - - marker_name = {} - - def it(): - for rec in records: - marker_name['next'] = rec[0] - yield ({'name': rec[0]} if not detailed - else - {'name': rec[0], 'metadata': utils.json_decode(rec[1])}) - - yield it() - yield marker_name and marker_name['next'] - - def get_metadata(self, name, project): - if project is None: - project = '' - - sel = sa.sql.select([tables.Queues.c.metadata], sa.and_( - tables.Queues.c.project == project, - tables.Queues.c.name == name - )) - - queue = self.driver.run(sel).fetchone() - if queue is None: - raise errors.QueueDoesNotExist(name, project) - - return utils.json_decode(queue[0]) - - def _get(self, name, project=None): - try: - return self.get_metadata(name, project) - except errors.QueueDoesNotExist: - return {} - - def _create(self, name, metadata=None, project=None): - if project is None: - project = '' - - try: - smeta = utils.json_encode(metadata or {}) - ins = tables.Queues.insert().values(project=project, - name=name, - metadata=smeta) - res = self.driver.run(ins) - except oslo_db.exception.DBDuplicateEntry: - return False - - return res.rowcount == 1 - - def _exists(self, name, project): - if project is None: - project = '' - - sel = sa.sql.select([tables.Queues.c.id], sa.and_( - tables.Queues.c.project == project, - tables.Queues.c.name == name - )) - res = self.driver.run(sel) - r = res.fetchone() - res.close() - return r is not None - - def set_metadata(self, name, metadata, project): - if project is None: - project = '' - - update = (tables.Queues.update(). - where(sa.and_( - tables.Queues.c.project == project, - tables.Queues.c.name == name)). - values(metadata=utils.json_encode(metadata))) - - res = self.driver.run(update) - - try: - if res.rowcount != 1: - raise errors.QueueDoesNotExist(name, project) - finally: - res.close() - - def _delete(self, name, project): - if project is None: - project = '' - - dlt = tables.Queues.delete().where(sa.and_( - tables.Queues.c.project == project, - tables.Queues.c.name == name)) - self.driver.run(dlt) - - def _stats(self, name, project): - pass diff --git a/zaqar/storage/sqlalchemy/tables.py b/zaqar/storage/sqlalchemy/tables.py deleted file mode 100644 index 60d76b9a..00000000 --- a/zaqar/storage/sqlalchemy/tables.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_utils import timeutils -import sqlalchemy as sa - -metadata = sa.MetaData() - -now = timeutils.utcnow - -Queues = sa.Table('Queues', metadata, - sa.Column('id', sa.INTEGER, primary_key=True), - sa.Column('project', sa.String(64)), - sa.Column('name', sa.String(64)), - sa.Column('metadata', sa.LargeBinary), - sa.UniqueConstraint('project', 'name'), - ) - - -PoolGroup = sa.Table('PoolGroup', metadata, - sa.Column('name', sa.String(64), primary_key=True)) - - -Pools = sa.Table('Pools', metadata, - sa.Column('name', sa.String(64), primary_key=True), - sa.Column('group', sa.ForeignKey('PoolGroup.name', - ondelete='CASCADE'), - nullable=True), - sa.Column('uri', sa.String(255), - unique=True, nullable=False), - sa.Column('weight', sa.INTEGER, nullable=False), - sa.Column('options', sa.Text())) - - -Flavors = sa.Table('Flavors', metadata, - sa.Column('name', sa.String(64), primary_key=True), - sa.Column('project', sa.String(64)), - sa.Column('pool_group', sa.ForeignKey('PoolGroup.name', - ondelete='CASCADE'), - nullable=False), - sa.Column('capabilities', sa.Text())) - - -Catalogue = sa.Table('Catalogue', metadata, - sa.Column('pool', sa.String(64), - sa.ForeignKey('Pools.name', - ondelete='CASCADE')), - sa.Column('project', sa.String(64)), - sa.Column('queue', sa.String(64), nullable=False), - sa.UniqueConstraint('project', 'queue')) diff --git a/zaqar/storage/sqlalchemy/utils.py b/zaqar/storage/sqlalchemy/utils.py deleted file mode 100644 index 614b4eb7..00000000 --- a/zaqar/storage/sqlalchemy/utils.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import functools - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import sqlalchemy as sa -from sqlalchemy import exc -from sqlalchemy.sql import func as sfunc - -from zaqar.storage import errors -from zaqar.storage.sqlalchemy import tables - - -LOG = logging.getLogger(__name__) -UNIX_EPOCH_AS_JULIAN_SEC = 2440587.5 * 86400.0 - - -def raises_conn_error(func): - """Handles sqlalchemy DisconnectionError - - When sqlalchemy detects a disconnect from the database server, it - retries a number of times. After failing that number of times, it - will convert the internal DisconnectionError into an - InvalidRequestError. This decorator handles that error. - """ - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except exc.InvalidRequestError as ex: - LOG.exception(ex) - raise errors.ConnectionError() - - return wrapper - - -class NoResult(Exception): - pass - - -def get_qid(driver, queue, project): - sel = sa.sql.select([tables.Queues.c.id], sa.and_( - tables.Queues.c.project == project, - tables.Queues.c.name == queue)) - try: - return driver.get(sel)[0] - except NoResult: - raise errors.QueueDoesNotExist(queue, project) - - -def get_age(created): - return sfunc.now() - created - -# The utilities below make the database IDs opaque to the users -# of Zaqar API. The only purpose is to advise the users NOT to -# make assumptions on the implementation of and/or relationship -# between the message IDs, the markers, and claim IDs. -# -# The magic numbers are arbitrarily picked; the numbers themselves -# come with no special functionalities. - - -def msgid_encode(id): - # NOTE(jeffrey4l): When using mysql-python, the id is converted to - # long type, which will lead to a L letter in the last. - return hex(int(id) ^ 0x5c693a53)[2:] - - -def msgid_decode(id): - try: - return int(id, 16) ^ 0x5c693a53 - - except ValueError: - return None - - -def marker_encode(id): - # NOTE(AAzza): cannot use oct(id) here, because on Python 3 it returns - # string with prefix '0o', whereas on Python 2 prefix is just '0' - return '{0:o}'.format(id ^ 0x3c96a355) - - -def marker_decode(id): - try: - return int(id, 8) ^ 0x3c96a355 - - except ValueError: - return None - - -def cid_encode(id): - return hex(id ^ 0x63c9a59c)[2:] - - -def cid_decode(id): - try: - return int(id, 16) ^ 0x63c9a59c - - except ValueError: - return None - - -def julian_to_unix(julian_sec): - """Converts Julian timestamp, in seconds, to a UNIX timestamp.""" - return int(round(julian_sec - UNIX_EPOCH_AS_JULIAN_SEC)) - - -def stat_message(message): - """Creates a stat document based on a message.""" - return { - 'id': message['id'], - 'age': message['age'], - 'created': message['created'], - } - - -def json_encode(obj): - return encodeutils.safe_encode(jsonutils.dumps(obj), 'utf-8') - - -def json_decode(binary): - return jsonutils.loads(binary, 'utf-8') diff --git a/zaqar/storage/swift/__init__.py b/zaqar/storage/swift/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/storage/swift/claims.py b/zaqar/storage/swift/claims.py deleted file mode 100644 index cef13f19..00000000 --- a/zaqar/storage/swift/claims.py +++ /dev/null @@ -1,194 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib -import math - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import swiftclient - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.swift import utils - - -class ClaimController(storage.Claim): - """Implements claims resource operations with swift backend - - Claims are scoped by project + queue. - """ - def __init__(self, *args, **kwargs): - super(ClaimController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - - @decorators.lazy_property(write=False) - def _queue_ctrl(self): - return self.driver.queue_controller - - def _exists(self, queue, claim_id, project=None): - try: - return self._client.head_object( - utils._claim_container(queue, project), - claim_id) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - raise errors.ClaimDoesNotExist(claim_id, queue, project) - raise - - def _get(self, queue, claim_id, project=None): - try: - container = utils._claim_container(queue, project) - headers, claim = self._client.get_object(container, claim_id) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - return - now = timeutils.utcnow_ts(True) - return { - 'id': claim_id, - 'age': now - float(headers['x-timestamp']), - 'ttl': int(headers['x-delete-at']) - math.floor(now), - } - - def get(self, queue, claim_id, project=None): - message_ctrl = self.driver.message_controller - now = timeutils.utcnow_ts(True) - self._exists(queue, claim_id, project) - - container = utils._claim_container(queue, project) - - headers, claim_obj = self._client.get_object(container, claim_id) - - def g(): - for msg_id in jsonutils.loads(claim_obj): - try: - headers, msg = message_ctrl._find_message(queue, msg_id, - project) - except errors.MessageDoesNotExist: - continue - else: - yield utils._message_to_json(msg_id, msg, headers, now) - - claim_meta = { - 'id': claim_id, - 'age': now - float(headers['x-timestamp']), - 'ttl': int(headers['x-delete-at']) - math.floor(now), - } - - return claim_meta, g() - - def create(self, queue, metadata, project=None, - limit=storage.DEFAULT_MESSAGES_PER_CLAIM): - message_ctrl = self.driver.message_controller - ttl = metadata['ttl'] - grace = metadata['grace'] - msg_ts = ttl + grace - claim_id = uuidutils.generate_uuid() - - messages, marker = message_ctrl._list(queue, project, limit=limit, - include_claimed=False) - - claimed = [] - for msg in messages: - md5 = hashlib.md5() - md5.update( - jsonutils.dumps( - {'body': msg['body'], 'claim_id': None, - 'ttl': msg['ttl']})) - md5 = md5.hexdigest() - msg_ttl = max(msg['ttl'], msg_ts) - content = jsonutils.dumps( - {'body': msg['body'], 'claim_id': claim_id, 'ttl': msg_ttl}) - try: - self._client.put_object( - utils._message_container(queue, project), - msg['id'], - content, - content_type='application/json', - headers={'x-object-meta-clientid': msg['client_uuid'], - 'if-match': md5, - 'x-object-meta-claimid': claim_id, - 'x-delete-after': msg_ttl}) - except swiftclient.ClientException as exc: - if exc.http_status == 412: - continue - raise - else: - msg['claim_id'] = claim_id - msg['ttl'] = msg_ttl - claimed.append(msg) - - utils._put_or_create_container( - self._client, - utils._claim_container(queue, project), - claim_id, - jsonutils.dumps([msg['id'] for msg in claimed]), - content_type='application/json', - headers={'x-delete-after': ttl} - ) - - return claim_id, claimed - - def update(self, queue, claim_id, metadata, project=None): - if not self._queue_ctrl.exists(queue, project): - raise errors.QueueDoesNotExist(queue, project) - - container = utils._claim_container(queue, project) - try: - headers, obj = self._client.get_object(container, claim_id) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - raise errors.ClaimDoesNotExist(claim_id, queue, project) - raise - - self._client.put_object(container, claim_id, obj, - content_type='application/json', - headers={'x-delete-after': metadata['ttl']}) - - def delete(self, queue, claim_id, project=None): - message_ctrl = self.driver.message_controller - try: - header, obj = self._client.get_object( - utils._claim_container(queue, project), - claim_id) - for msg_id in jsonutils.loads(obj): - try: - headers, msg = message_ctrl._find_message(queue, msg_id, - project) - except errors.MessageDoesNotExist: - continue - md5 = hashlib.md5() - md5.update(msg) - md5 = md5.hexdigest() - msg = jsonutils.loads(msg) - content = jsonutils.dumps( - {'body': msg['body'], 'claim_id': None, 'ttl': msg['ttl']}) - client_id = headers['x-object-meta-clientid'] - self._client.put_object( - utils._message_container(queue, project), - msg_id, - content, - content_type='application/json', - headers={'x-object-meta-clientid': client_id, - 'if-match': md5, - 'x-delete-at': headers['x-delete-at']}) - - self._client.delete_object( - utils._claim_container(queue, project), - claim_id) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise diff --git a/zaqar/storage/swift/controllers.py b/zaqar/storage/swift/controllers.py deleted file mode 100644 index 9d6e80b8..00000000 --- a/zaqar/storage/swift/controllers.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.storage.swift import claims -from zaqar.storage.swift import messages -from zaqar.storage.swift import subscriptions - - -MessageController = messages.MessageController -ClaimController = claims.ClaimController -SubscriptionController = subscriptions.SubscriptionController diff --git a/zaqar/storage/swift/driver.py b/zaqar/storage/swift/driver.py deleted file mode 100644 index 4b862b14..00000000 --- a/zaqar/storage/swift/driver.py +++ /dev/null @@ -1,123 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from osprofiler import profiler -from six.moves import urllib - -from keystoneauth1.identity import generic -from keystoneauth1 import session as keystone_session -from oslo_log import log as logging -import swiftclient - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage.swift import controllers -from zaqar.storage.swift import options - -LOG = logging.getLogger(__name__) - - -class DataDriver(storage.DataDriverBase): - - _DRIVER_OPTIONS = options._config_options() - - def __init__(self, conf, cache, control_driver): - super(DataDriver, self).__init__(conf, cache, control_driver) - self.swift_conf = self.conf[options.MESSAGE_SWIFT_GROUP] - - @property - def capabilities(self): - return ( - storage.Capabilities.AOD, - storage.Capabilities.DURABILITY, - ) - - @decorators.lazy_property(write=False) - def connection(self): - return _ClientWrapper(self.swift_conf) - - def is_alive(self): - try: - self.connection.get_capabilities() - return True - except Exception as e: - LOG.exception(e) - return False - - @decorators.lazy_property(write=False) - def message_controller(self): - controller = controllers.MessageController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("swift_message_controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def subscription_controller(self): - controller = controllers.SubscriptionController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("swift_subscription_" - "controller")(controller) - else: - return controller - - @decorators.lazy_property(write=False) - def claim_controller(self): - controller = controllers.ClaimController(self) - if (self.conf.profiler.enabled and - self.conf.profiler.trace_message_store): - return profiler.trace_cls("swift_claim_controller")(controller) - else: - return controller - - def _health(self): - raise NotImplementedError("No health checks") - - def close(self): - pass - - -class _ClientWrapper(object): - """Wrapper around swiftclient.Connection. - - This wraps swiftclient.Connection to give the same API, but provide a - thread-safe alternative with a different object for every method call. It - maintains performance by managing authentication itself, and passing the - token afterwards. - """ - - def __init__(self, conf): - self.conf = conf - self.parsed_url = urllib.parse.urlparse(conf.uri) - self.session = None - - def _init_auth(self): - auth = generic.Password( - username=self.parsed_url.username, - password=self.parsed_url.password, - project_name=self.parsed_url.path[1:], - user_domain_id=self.conf.user_domain_id, - user_domain_name=self.conf.user_domain_name, - project_domain_id=self.conf.project_domain_id, - project_domain_name=self.conf.project_domain_name, - auth_url=self.conf.auth_url) - self.session = keystone_session.Session(auth=auth) - - def __getattr__(self, attr): - if self.session is None: - self._init_auth() - client = swiftclient.Connection(session=self.session, - insecure=self.conf.insecure) - return getattr(client, attr) diff --git a/zaqar/storage/swift/messages.py b/zaqar/storage/swift/messages.py deleted file mode 100644 index 21d3e072..00000000 --- a/zaqar/storage/swift/messages.py +++ /dev/null @@ -1,374 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import functools -import uuid - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import swiftclient - -from zaqar.common import decorators -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.swift import utils - - -class MessageController(storage.Message): - """Implements message resource operations with swift backend - - Messages are scoped by project + queue. - - message -> Swift mapping: - +--------------+-----------------------------------------+ - | Attribute | Storage location | - +--------------+-----------------------------------------+ - | Msg UUID | Object name | - +--------------+-----------------------------------------+ - | Queue Name | Container name prefix | - +--------------+-----------------------------------------+ - | Project name | Container name prefix | - +--------------+-----------------------------------------+ - | Created time | Object Creation Time | - +--------------+-----------------------------------------+ - | Msg Body | Object content 'body' | - +--------------+-----------------------------------------+ - | Client ID | Object header 'ClientID' | - +--------------+-----------------------------------------+ - | Claim ID | Object content 'claim_id' | - +--------------+-----------------------------------------+ - | Expires | Object Delete-After header | - +--------------------------------------------------------+ - """ - - def __init__(self, *args, **kwargs): - super(MessageController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - - @decorators.lazy_property(write=False) - def _queue_ctrl(self): - return self.driver.queue_controller - - def _delete_queue_messages(self, queue, project, pipe): - """Method to remove all the messages belonging to a queue. - - Will be referenced from the QueueController. - The pipe to execute deletion will be passed from the QueueController - executing the operation. - """ - container = utils._message_container(queue, project) - remaining = True - key = '' - while remaining: - headers, objects = self._client.get_container(container, - limit=1000, - marker=key) - if not objects: - return - remaining = len(objects) == 1000 - key = objects[-1]['name'] - for o in objects: - try: - self._client.delete_object(container, o['name']) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - continue - raise - - def _list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, - include_claimed=False, sort=1): - """List messages in the queue, oldest first(ish) - - Time ordering and message inclusion in lists are soft, there is no - global order and times are based on the UTC time of the zaqar-api - server that the message was created from. - - Here be consistency dragons. - """ - if not self._queue_ctrl.exists(queue, project): - raise errors.QueueDoesNotExist(queue, project) - - client = self._client - container = utils._message_container(queue, project) - query_string = None - if sort == -1: - query_string = 'reverse=on' - - try: - _, objects = client.get_container( - container, - marker=marker, - # list 2x the objects because some listing items may have - # expired - limit=limit * 2, - query_string=query_string) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - raise errors.QueueDoesNotExist(queue, project) - raise - - def is_claimed(msg, headers): - if include_claimed or msg['claim_id'] is None: - return False - claim_obj = self.driver.claim_controller._get( - queue, msg['claim_id'], project) - return claim_obj is not None and claim_obj['ttl'] > 0 - - def is_echo(msg, headers): - if echo: - return False - return headers['x-object-meta-clientid'] == str(client_uuid) - - filters = [ - is_echo, - is_claimed, - ] - marker = {} - get_object = functools.partial(client.get_object, container) - list_objects = functools.partial(client.get_container, container, - limit=limit * 2, - query_string=query_string) - yield utils._filter_messages(objects, filters, marker, get_object, - list_objects, limit=limit) - yield marker and marker['next'] - - def list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_MESSAGES_PER_PAGE, - echo=False, client_uuid=None, - include_claimed=False): - return self._list(queue, project, marker, limit, echo, - client_uuid, include_claimed) - - def first(self, queue, project=None, sort=1): - if sort not in (1, -1): - raise ValueError(u'sort must be either 1 (ascending) ' - u'or -1 (descending)') - cursor = self._list(queue, project, limit=1, sort=sort) - try: - message = next(next(cursor)) - except StopIteration: - raise errors.QueueIsEmpty(queue, project) - return message - - def get(self, queue, message_id, project=None): - return self._get(queue, message_id, project) - - def _get(self, queue, message_id, project=None, check_queue=True): - if check_queue and not self._queue_ctrl.exists(queue, project): - raise errors.QueueDoesNotExist(queue, project) - - now = timeutils.utcnow_ts(True) - - headers, msg = self._find_message(queue, message_id, project) - return utils._message_to_json(message_id, msg, headers, now) - - def _find_message(self, queue, message_id, project): - try: - return self._client.get_object( - utils._message_container(queue, project), message_id) - - except swiftclient.ClientException as exc: - if exc.http_status == 404: - raise errors.MessageDoesNotExist(message_id, queue, project) - else: - raise - - def bulk_delete(self, queue, message_ids, project=None): - for id in message_ids: - try: - self._delete(queue, id, project) - except errors.MessageDoesNotExist: - pass - - def bulk_get(self, queue, message_ids, project=None): - if not self._queue_ctrl.exists(queue, project): - raise StopIteration() - - for id in message_ids: - try: - yield self._get(queue, id, project, check_queue=False) - except errors.MessageDoesNotExist: - pass - - def post(self, queue, messages, client_uuid, project=None): - # TODO(flwang): It would be nice if we can create a middleware in Swift - # to accept a json list so that Zaqar can create objects in bulk. - return [self._create_msg(queue, m, client_uuid, project) - for m in messages] - - def _create_msg(self, queue, msg, client_uuid, project): - slug = str(uuid.uuid1()) - contents = jsonutils.dumps( - {'body': msg.get('body', {}), 'claim_id': None, 'ttl': msg['ttl']}) - try: - self._client.put_object( - utils._message_container(queue, project), - slug, - contents=contents, - content_type='application/json', - headers={ - 'x-object-meta-clientid': str(client_uuid), - 'x-delete-after': msg['ttl']}) - except swiftclient.ClientException as exc: - # NOTE(flwang): To avoid check the queue existence each time when - # posting messages, let's catch the 404 and create the 'queue' - # on demand. - if exc.http_status == 404: - self._client.put_container(utils._message_container(queue, - project)) - self._client.put_object( - utils._message_container(queue, project), - slug, - contents=contents, - content_type='application/json', - headers={ - 'x-object-meta-clientid': str(client_uuid), - 'x-delete-after': msg['ttl']}) - - return slug - - def delete(self, queue, message_id, project=None, claim=None): - claim_ctrl = self.driver.claim_controller - try: - msg = self._get(queue, message_id, project) - except (errors.QueueDoesNotExist, errors.MessageDoesNotExist): - return - if claim is None: - if msg['claim_id']: - claim_obj = claim_ctrl._get(queue, msg['claim_id'], project) - if claim_obj is not None and claim_obj['ttl'] > 0: - raise errors.MessageIsClaimed(message_id) - else: - # Check if the claim does exist - claim_ctrl._exists(queue, claim, project) - if not msg['claim_id']: - raise errors.MessageNotClaimed(message_id) - elif msg['claim_id'] != claim: - raise errors.MessageNotClaimedBy(message_id, claim) - - self._delete(queue, message_id, project) - - def _delete(self, queue, message_id, project=None): - try: - self._client.delete_object( - utils._message_container(queue, project), message_id) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - - def pop(self, queue, limit, project=None): - # Pop is implemented as a chain of the following operations: - # 1. Create a claim. - # 2. Delete the messages claimed. - # 3. Delete the claim. - claim_ctrl = self.driver.claim_controller - claim_id, messages = claim_ctrl.create(queue, dict(ttl=1, grace=0), - project, limit=limit) - - message_ids = [message['id'] for message in messages] - self.bulk_delete(queue, message_ids, project) - return messages - - -class MessageQueueHandler(object): - def __init__(self, driver, control_driver): - self.driver = driver - self._client = self.driver.connection - self._queue_ctrl = self.driver.queue_controller - self._message_ctrl = self.driver.message_controller - self._claim_ctrl = self.driver.claim_controller - - def create(self, name, metadata=None, project=None): - self._client.put_container(utils._message_container(name, project)) - - def delete(self, name, project=None): - for container in [utils._message_container(name, project), - utils._claim_container(name, project)]: - try: - headers, objects = self._client.get_container(container) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - else: - for obj in objects: - try: - self._client.delete_object(container, obj['name']) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - try: - self._client.delete_container(container) - except swiftclient.ClientException as exc: - if exc.http_status not in (404, 409): - raise - - def stats(self, name, project=None): - if not self._queue_ctrl.exists(name, project=project): - raise errors.QueueDoesNotExist(name, project) - - total = 0 - claimed = 0 - container = utils._message_container(name, project) - - try: - _, objects = self._client.get_container(container) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - raise errors.QueueIsEmpty(name, project) - - newest = None - oldest = None - now = timeutils.utcnow_ts(True) - for obj in objects: - try: - headers = self._client.head_object(container, obj['name']) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - else: - created = float(headers['x-timestamp']) - newest = { - 'id': obj['name'], - 'age': now - created, - 'created': timeutils.iso8601_from_timestamp(created)} - if oldest is None: - oldest = copy.deepcopy(newest) - total += 1 - if headers.get('x-object-meta-claimid'): - claimed += 1 - - msg_stats = { - 'claimed': claimed, - 'free': total - claimed, - 'total': total, - } - if newest is not None: - msg_stats['newest'] = newest - msg_stats['oldest'] = oldest - - return {'messages': msg_stats} - - def exists(self, queue, project=None): - try: - self._client.head_container(utils._message_container(queue, - project)) - - except swiftclient.ClientException as exc: - if exc.http_status == 404: - return False - raise - else: - return True diff --git a/zaqar/storage/swift/options.py b/zaqar/storage/swift/options.py deleted file mode 100644 index 4b57d063..00000000 --- a/zaqar/storage/swift/options.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Swift storage driver configuration options.""" - -from oslo_config import cfg -MESSAGE_SWIFT_OPTIONS = ( - cfg.StrOpt("auth_url", default="http://127.0.0.1:5000/v3/", - help="URI of Keystone endpoint to discover Swift"), - cfg.StrOpt("uri", - default="swift://demo:nomoresecrete@/demo", - help="Custom URI describing the swift connection."), - cfg.StrOpt("insecure", default=False, help="Don't check SSL certificate"), - cfg.StrOpt("project_domain_id", default="default", - help="Domain ID containing project"), - cfg.StrOpt("project_domain_name", help="Domain name containing project"), - cfg.StrOpt("user_domain_id", default="default", help="User's domain id"), - cfg.StrOpt("user_domain_name", help="User's domain name"), -) - - -MESSAGE_SWIFT_GROUP = 'drivers:message_store:swift' - - -def _config_options(): - return [(MESSAGE_SWIFT_GROUP, MESSAGE_SWIFT_OPTIONS), ] diff --git a/zaqar/storage/swift/subscriptions.py b/zaqar/storage/swift/subscriptions.py deleted file mode 100644 index 0058fed3..00000000 --- a/zaqar/storage/swift/subscriptions.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import functools - -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import swiftclient -import urllib - -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage.swift import utils - - -class SubscriptionController(storage.Subscription): - """Implements subscription resource operations with swift backend. - - Subscriptions are scoped by queue and project. - - subscription -> Swift mapping: - +----------------+---------------------------------------+ - | Attribute | Storage location | - +----------------+---------------------------------------+ - | Sub UUID | Object name | - +----------------+---------------------------------------+ - | Queue Name | Container name prefix | - +----------------+---------------------------------------+ - | Project name | Container name prefix | - +----------------+---------------------------------------+ - | Created time | Object Creation Time | - +----------------+---------------------------------------+ - | Sub options | Object content | - +----------------+---------------------------------------+ - """ - - def __init__(self, *args, **kwargs): - super(SubscriptionController, self).__init__(*args, **kwargs) - self._client = self.driver.connection - - def list(self, queue, project=None, marker=None, - limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE): - container = utils._subscription_container(queue, project) - try: - _, objects = self._client.get_container(container, - limit=limit, - marker=marker) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - objects = [] - else: - raise - marker_next = {} - yield utils.SubscriptionListCursor( - objects, marker_next, - functools.partial(self._client.get_object, container)) - yield marker_next and marker_next['next'] - - def get(self, queue, subscription_id, project=None): - container = utils._subscription_container(queue, project) - try: - headers, data = self._client.get_object(container, subscription_id) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - raise errors.SubscriptionDoesNotExist(subscription_id) - raise - return utils._subscription_to_json(data, headers) - - def create(self, queue, subscriber, ttl, options, project=None): - sub_container = utils._subscriber_container(queue, project) - slug = uuidutils.generate_uuid() - try: - utils._put_or_create_container( - self._client, - sub_container, - urllib.quote_plus(subscriber), - contents=slug, - headers={'x-delete-after': ttl, 'if-none-match': '*'}) - except swiftclient.ClientException as exc: - if exc.http_status == 412: - return - raise - container = utils._subscription_container(queue, project) - data = {'id': slug, - 'source': queue, - 'subscriber': subscriber, - 'options': options, - 'ttl': ttl, - 'confirmed': False} - utils._put_or_create_container( - self._client, container, slug, contents=jsonutils.dumps(data), - content_type='application/json', headers={'x-delete-after': ttl}) - return slug - - def update(self, queue, subscription_id, project=None, **kwargs): - container = utils._subscription_container(queue, project) - data = self.get(queue, subscription_id, project) - data.pop('age') - ttl = data['ttl'] - if 'subscriber' in kwargs: - sub_container = utils._subscriber_container(queue, project) - try: - self._client.put_object( - sub_container, - urllib.quote_plus(kwargs['subscriber']), - contents=subscription_id, - headers={'x-delete-after': ttl, 'if-none-match': '*'}) - except swiftclient.ClientException as exc: - if exc.http_status == 412: - raise errors.SubscriptionAlreadyExists() - raise - self._client.delete_object(sub_container, - urllib.quote_plus(data['subscriber'])) - data.update(kwargs) - self._client.put_object(container, - subscription_id, - contents=jsonutils.dumps(data), - content_type='application/json', - headers={'x-delete-after': ttl}) - - def exists(self, queue, subscription_id, project=None): - container = utils._subscription_container(queue, project) - return self._client.head_object(container, subscription_id) - - def delete(self, queue, subscription_id, project=None): - try: - data = self.get(queue, subscription_id, project) - except errors.SubscriptionDoesNotExist: - return - sub_container = utils._subscriber_container(queue, project) - try: - self._client.delete_object(sub_container, - urllib.quote_plus(data['subscriber'])) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - container = utils._subscription_container(queue, project) - try: - self._client.delete_object(container, subscription_id) - except swiftclient.ClientException as exc: - if exc.http_status != 404: - raise - - def get_with_subscriber(self, queue, subscriber, project=None): - sub_container = utils._subscriber_container(queue, project) - headers, obj = self._client.get_object(sub_container, - urllib.quote_plus(subscriber)) - return self.get(queue, obj, project) - - def confirm(self, queue, subscription_id, project=None, confirmed=True): - self.update(queue, subscription_id, project, confirmed=True) diff --git a/zaqar/storage/swift/utils.py b/zaqar/storage/swift/utils.py deleted file mode 100644 index ea503223..00000000 --- a/zaqar/storage/swift/utils.py +++ /dev/null @@ -1,144 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import swiftclient - - -def _message_container(queue, project=None): - return "zaqar_message:%s:%s" % (queue, project) - - -def _claim_container(queue=None, project=None): - return "zaqar_claim:%s:%s" % (queue, project) - - -def _subscription_container(queue, project=None): - return "zaqar_subscription:%s:%s" % (queue, project) - - -def _subscriber_container(queue, project=None): - return "zaqar_subscriber:%s:%s" % (queue, project) - - -def _put_or_create_container(client, *args, **kwargs): - """PUT a swift object to a container that may not exist - - Takes the exact arguments of swiftclient.put_object but will - autocreate a container that doesn't exist - """ - try: - client.put_object(*args, **kwargs) - except swiftclient.ClientException as e: - if e.http_status == 404: - client.put_container(args[0]) - client.put_object(*args, **kwargs) - else: - raise - - -def _message_to_json(message_id, msg, headers, now): - msg = jsonutils.loads(msg) - - return { - 'id': message_id, - 'age': now - float(headers['x-timestamp']), - 'ttl': msg['ttl'], - 'body': msg['body'], - 'claim_id': msg['claim_id'] - } - - -def _subscription_to_json(sub, headers): - sub = jsonutils.loads(sub) - now = timeutils.utcnow_ts(True) - return {'id': sub['id'], - 'age': now - float(headers['x-timestamp']), - 'source': sub['source'], - 'subscriber': sub['subscriber'], - 'ttl': sub['ttl'], - 'options': sub['options'], - 'confirmed': sub['confirmed']} - - -def _filter_messages(messages, filters, marker, get_object, list_objects, - limit): - """Create a filtering iterator over a list of messages. - - The function accepts a list of filters to be filtered - before the the message can be included as a part of the reply. - """ - now = timeutils.utcnow_ts(True) - - for msg in messages: - if msg is None: - continue - - marker['next'] = msg['name'] - try: - headers, obj = get_object(msg['name']) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - continue - raise - obj = jsonutils.loads(obj) - for should_skip in filters: - if should_skip(obj, headers): - break - else: - limit -= 1 - yield { - 'id': marker['next'], - 'ttl': obj['ttl'], - 'client_uuid': headers['x-object-meta-clientid'], - 'body': obj['body'], - 'age': now - float(headers['x-timestamp']), - 'claim_id': obj['claim_id'], - } - if limit <= 0: - break - if limit > 0 and marker: - # We haven't reached the limit, let's try to get some more messages - _, objects = list_objects(marker=marker['next']) - if not objects: - return - for msg in _filter_messages(objects, filters, marker, get_object, - list_objects, limit): - yield msg - - -class SubscriptionListCursor(object): - - def __init__(self, objects, marker_next, get_object): - self.objects = iter(objects) - self.marker_next = marker_next - self.get_object = get_object - - def __iter__(self): - return self - - def next(self): - while True: - curr = next(self.objects) - self.marker_next['next'] = curr['name'] - try: - headers, sub = self.get_object(curr['name']) - except swiftclient.ClientException as exc: - if exc.http_status == 404: - continue - raise - return _subscription_to_json(sub, headers) - - def __next__(self): - return self.next() diff --git a/zaqar/storage/utils.py b/zaqar/storage/utils.py deleted file mode 100644 index 4a9a4a65..00000000 --- a/zaqar/storage/utils.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import copy - -from oslo_config import cfg -from oslo_log import log -from osprofiler import profiler -import six -from stevedore import driver - -from zaqar.common import errors -from zaqar.common import utils -from zaqar.storage import configuration - -LOG = log.getLogger(__name__) - - -def dynamic_conf(uri, options, conf=None): - """Given metadata, yields a dynamic configuration. - - :param uri: pool location - :type uri: six.text_type - :param options: additional pool metadata - :type options: dict - :param conf: Optional conf object to copy - :type conf: `oslo_config.cfg.ConfigOpts` - :returns: Configuration object suitable for constructing storage - drivers - :rtype: oslo_config.cfg.ConfigOpts - """ - storage_type = six.moves.urllib_parse.urlparse(uri).scheme - - # NOTE(cpp-cabrera): parse storage-specific opts: - # 'drivers:storage:{type}' - options['uri'] = uri - storage_opts = utils.dict_to_conf(options) - storage_group = u'drivers:message_store:%s' % storage_type - - # NOTE(cpp-cabrera): register those options! - if conf is None: - conf = cfg.ConfigOpts() - else: - conf_wrap = configuration.Configuration(conf) - conf = copy.copy(conf_wrap) - - if storage_group not in conf: - conf.register_opts(storage_opts, group=storage_group) - - if 'drivers' not in conf: - # NOTE(cpp-cabrera): parse general opts: 'drivers' - driver_opts = utils.dict_to_conf({'message_store': storage_type}) - conf.register_opts(driver_opts, group=u'drivers') - - conf.set_override('message_store', storage_type, 'drivers') - - for opt in options: - if opt in conf[storage_group]: - conf.set_override(opt, options[opt], group=storage_group) - return conf - - -def load_storage_impl(uri, control_mode=False, default_store=None): - """Loads a storage driver implementation and returns it. - - :param uri: The connection uri to parse and load a driver for. - :param control_mode: (Default False). Determines which - driver type to load; if False, the data driver is - loaded. If True, the control driver is loaded. - :param default_store: The default store to load if no scheme - is parsed. - """ - - mode = 'control' if control_mode else 'data' - driver_type = 'zaqar.{0}.storage'.format(mode) - storage_type = six.moves.urllib_parse.urlparse(uri).scheme or default_store - - try: - mgr = driver.DriverManager(driver_type, storage_type, - invoke_on_load=False) - - return mgr.driver - - except Exception as exc: - LOG.exception(exc) - raise errors.InvalidDriver(exc) - - -def load_storage_driver(conf, cache, storage_type=None, - control_mode=False, control_driver=None): - """Loads a storage driver and returns it. - - The driver's initializer will be passed conf and cache as - its positional args. - - :param conf: Configuration instance to use for loading the - driver. Must include a 'drivers' group. - :param cache: Cache instance that the driver can (optionally) - use to reduce latency for some operations. - :param storage_type: The storage_type to load. If None, then - the `drivers` option will be used. - :param control_mode: (Default False). Determines which - driver type to load; if False, the data driver is - loaded. If True, the control driver is loaded. - :param control_driver: (Default None). The control driver - instance to pass to the storage driver. Needed to access - the queue controller, mainly. - """ - if control_mode: - mode = 'control' - storage_type = storage_type or conf['drivers'].management_store - else: - mode = 'data' - storage_type = storage_type or conf['drivers'].message_store - - driver_type = 'zaqar.{0}.storage'.format(mode) - - _invoke_args = [conf, cache] - if control_driver is not None: - _invoke_args.append(control_driver) - - try: - mgr = driver.DriverManager(driver_type, - storage_type, - invoke_on_load=True, - invoke_args=_invoke_args) - - if conf.profiler.enabled: - if ((mode == "control" and conf.profiler.trace_management_store) or - (mode == "data" and conf.profiler.trace_message_store)): - trace_name = '{0}_{1}_driver'.format(storage_type, mode) - return profiler.trace_cls(trace_name, - trace_private=True)(mgr.driver) - else: - return mgr.driver - - except Exception as exc: - LOG.error('Failed to load "{}" driver for "{}"'.format( - driver_type, storage_type)) - LOG.exception(exc) - raise errors.InvalidDriver(exc) - - -def keyify(key, iterable): - """Make an iterator from an iterable of dicts compared with a key. - - :param key: A key exists for all dict inside the iterable object - :param iterable: The input iterable object - """ - - class Keyed(object): - def __init__(self, obj): - self.obj = obj - - def __eq__(self, other): - return self.obj[key] == other.obj[key] - - def __ne__(self, other): - return self.obj[key] != other.obj[key] - - def __lt__(self, other): - return self.obj[key] < other.obj[key] - - def __le__(self, other): - return self.obj[key] <= other.obj[key] - - def __gt__(self, other): - return self.obj[key] > other.obj[key] - - def __ge__(self, other): - return self.obj[key] >= other.obj[key] - - for item in iterable: - yield Keyed(item) - - -def can_connect(uri, conf=None): - """Given a URI, verifies whether it's possible to connect to it. - - :param uri: connection string to a storage endpoint - :type uri: six.text_type - :returns: True if can connect else False - :rtype: bool - """ - # NOTE(cabrera): create a mock configuration containing only - # the URI field. This should be sufficient to initialize a - # storage driver. - conf = dynamic_conf(uri, {}, conf=conf) - storage_type = six.moves.urllib_parse.urlparse(uri).scheme - - try: - ctrl = load_storage_driver(conf, None, - storage_type=conf.drivers.management_store, - control_mode=True) - driver = load_storage_driver(conf, None, - storage_type=storage_type, - control_driver=ctrl) - return driver.is_alive() - except Exception as exc: - LOG.debug('Can\'t connect to: %s \n%s', (uri, exc)) - return False diff --git a/zaqar/tests/__init__.py b/zaqar/tests/__init__.py deleted file mode 100644 index 4f7d7180..00000000 --- a/zaqar/tests/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Zaqar Unit-ish Tests""" - -from zaqar.tests import base -from zaqar.tests import helpers - - -SKIP_SLOW_TESTS = helpers.SKIP_SLOW_TESTS -RUN_SLOW_TESTS = not SKIP_SLOW_TESTS - -expect = helpers.expect -is_slow = helpers.is_slow -requires_mongodb = helpers.requires_mongodb -requires_redis = helpers.requires_redis -requires_swift = helpers.requires_swift -TestBase = base.TestBase diff --git a/zaqar/tests/base.py b/zaqar/tests/base.py deleted file mode 100644 index e6b632df..00000000 --- a/zaqar/tests/base.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import fixtures -from oslo_config import cfg -from oslo_log import log -from osprofiler import opts -import testtools - -from zaqar.common import configs -from zaqar.tests import helpers - - -class TestBase(testtools.TestCase): - """Child class of testtools.TestCase for testing Zaqar. - - Inherit from this and write your test methods. If the child class defines - a prepare(self) method, this method will be called before executing each - test method. - """ - - config_file = None - - def setUp(self): - super(TestBase, self).setUp() - - self.useFixture(fixtures.FakeLogger('zaqar')) - - if os.environ.get('OS_STDOUT_CAPTURE') is not None: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') is not None: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - if self.config_file: - self.config_file = helpers.override_mongo_conf( - self.config_file, self) - self.conf = self.load_conf(self.config_file) - else: - self.conf = cfg.ConfigOpts() - - self.conf.register_opts(configs._GENERAL_OPTIONS) - self.conf.register_opts(configs._DRIVER_OPTIONS, - group=configs._DRIVER_GROUP) - self.conf.register_opts(configs._NOTIFICATION_OPTIONS, - group=configs._NOTIFICATION_GROUP) - self.conf.register_opts(configs._NOTIFICATION_OPTIONS, - group=configs._NOTIFICATION_GROUP) - self.conf.register_opts(configs._SIGNED_URL_OPTIONS, - group=configs._SIGNED_URL_GROUP) - opts.set_defaults(self.conf) - self.conf.register_opts(configs._PROFILER_OPTIONS, - group=configs._PROFILER_GROUP) - - self.mongodb_url = os.environ.get('ZAQAR_TEST_MONGODB_URL', - 'mongodb://127.0.0.1:27017') - - @classmethod - def conf_path(cls, filename): - """Returns the full path to the specified Zaqar conf file. - - :param filename: Name of the conf file to find (e.g., - 'wsgi_memory.conf') - """ - - if os.path.exists(filename): - return filename - - return os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], filename) - - @classmethod - def load_conf(cls, filename): - """Loads `filename` configuration file. - - :param filename: Name of the conf file to find (e.g., - 'wsgi_memory.conf') - - :returns: Project's config object. - """ - conf = cfg.ConfigOpts() - log.register_options(conf) - conf(args=[], default_config_files=[cls.conf_path(filename)]) - return conf - - def config(self, group=None, **kw): - """Override some configuration values. - - The keyword arguments are the names of configuration options to - override and their values. - - If a group argument is supplied, the overrides are applied to - the specified configuration option group. - - All overrides are automatically cleared at the end of the current - test by the tearDown() method. - """ - for k, v in kw.items(): - self.conf.set_override(k, v, group) - - def _my_dir(self): - return os.path.abspath(os.path.dirname(__file__)) diff --git a/zaqar/tests/etc/drivers_storage_invalid.conf b/zaqar/tests/etc/drivers_storage_invalid.conf deleted file mode 100644 index df0dbfcc..00000000 --- a/zaqar/tests/etc/drivers_storage_invalid.conf +++ /dev/null @@ -1,12 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -admin_mode = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = invalid - -[drivers:transport:wsgi] -port = 8888 diff --git a/zaqar/tests/etc/drivers_transport_invalid.conf b/zaqar/tests/etc/drivers_transport_invalid.conf deleted file mode 100644 index 907683d1..00000000 --- a/zaqar/tests/etc/drivers_transport_invalid.conf +++ /dev/null @@ -1,11 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = invalid -message_store = sqlalchemy - -[drivers:transport:wsgi] -port = 8888 diff --git a/zaqar/tests/etc/functional-tests.conf b/zaqar/tests/etc/functional-tests.conf deleted file mode 100644 index 4b47c68b..00000000 --- a/zaqar/tests/etc/functional-tests.conf +++ /dev/null @@ -1,12 +0,0 @@ -[DEFAULT] -# run_tests = True -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[zaqar] -# url = http://0.0.0.0:8888 -# config = functional-zaqar.conf - -[headers] -# useragent = FunctionalTests -# project_id = 123456 diff --git a/zaqar/tests/etc/functional-zaqar.conf b/zaqar/tests/etc/functional-zaqar.conf deleted file mode 100644 index 011c7dcb..00000000 --- a/zaqar/tests/etc/functional-zaqar.conf +++ /dev/null @@ -1,58 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -enable_deprecated_api_versions = 1,1.1 - -# Log to this file! -; log_file = /var/log/zaqar/server.log - -;auth_strategy = - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -;use_syslog = False - -# Facility to use. If unset defaults to LOG_USER. -;syslog_log_facility = LOG_LOCAL0 - -unreliable = True -enable_deprecated_api_versions = 1, 1.1 - -[drivers] -# Transport driver module (e.g., wsgi, zmq) -transport = wsgi -# Storage driver module (e.g., mongodb, sqlalchemy) -message_store = mongodb - -[drivers:transport:wsgi] -bind = 127.0.0.1 -port = 8888 - -;[drivers:transport:zmq] -;port = 9999 - -[limits:transport] -# The maximum number of queue records per page when listing queues -;max_queues_per_page = 20 - -# Maximum number of messages per page when listing messages. -;max_messages_per_page = 20 - -# Maximum number of messages that can be claimed or popped at a time. -;max_messages_per_claim_or_pop = 20 - -# Expiration limits; the minimal values are all 60 (seconds) -;max_message_ttl = 1209600 -;max_claim_ttl = 43200 -;max_claim_grace = 43200 - -# Maximum size in bytes allowed for queue metadata and bulk/single -# message post bodies (including whitespace and envelope fields). -;max_queue_metadata = 65536 -;max_messages_post_size = 262144 diff --git a/zaqar/tests/etc/keystone_auth.conf b/zaqar/tests/etc/keystone_auth.conf deleted file mode 100644 index 78763bbf..00000000 --- a/zaqar/tests/etc/keystone_auth.conf +++ /dev/null @@ -1,14 +0,0 @@ -[DEFAULT] -auth_strategy = keystone - -debug = False -verbose = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb - -[drivers:transport:wsgi] -bind = 0.0.0.0:8888 -workers = 20 diff --git a/zaqar/tests/etc/policy.json b/zaqar/tests/etc/policy.json deleted file mode 100644 index 83a6bd5d..00000000 --- a/zaqar/tests/etc/policy.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "queues:get_all": "", - "queues:create": "", - "queues:get": "", - "queues:delete": "", - "queues:update": "", - "queues:stats": "", - "queues:share": "", - "queues:purge": "", - - "messages:get_all": "", - "messages:create": "", - "messages:get": "", - "messages:delete": "", - "messages:delete_all": "", - - "claims:get_all": "", - "claims:create": "", - "claims:get": "", - "claims:delete": "", - "claims:update": "", - - "subscription:get_all": "", - "subscription:create": "", - "subscription:get": "", - "subscription:delete": "", - "subscription:update": "", - "subscription:confirm": "", - - "pools:get_all": "rule:context_is_admin", - "pools:create": "rule:context_is_admin", - "pools:get": "rule:context_is_admin", - "pools:delete": "rule:context_is_admin", - "pools:update": "rule:context_is_admin", - - "flavors:get_all": "", - "flavors:create": "rule:context_is_admin", - "flavors:get": "", - "flavors:delete": "rule:context_is_admin", - "flavors:update": "rule:context_is_admin", - - "ping:get": "", - "health:get": "rule:context_is_admin" -} diff --git a/zaqar/tests/etc/websocket_mongodb.conf b/zaqar/tests/etc/websocket_mongodb.conf deleted file mode 100644 index c8bbcd5c..00000000 --- a/zaqar/tests/etc/websocket_mongodb.conf +++ /dev/null @@ -1,21 +0,0 @@ -[DEFAULT] -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] - -# Transport driver to use (string value) -transport = websocket - -# Storage driver to use (string value) -message_store = mongodb - -[drivers:management_store:mongodb] - -# Mongodb Connection URI -uri = mongodb://127.0.0.1:27017 - -[drivers:message_store:mongodb] - -# Mongodb Connection URI -uri = mongodb://127.0.0.1:27017 \ No newline at end of file diff --git a/zaqar/tests/etc/websocket_mongodb_keystone_auth.conf b/zaqar/tests/etc/websocket_mongodb_keystone_auth.conf deleted file mode 100644 index f544ee91..00000000 --- a/zaqar/tests/etc/websocket_mongodb_keystone_auth.conf +++ /dev/null @@ -1,21 +0,0 @@ -[DEFAULT] -auth_strategy = keystone -enable_deprecated_api_versions = 1,1.1 - -[drivers] - -# Transport driver to use (string value) -transport = websocket - -# Storage driver to use (string value) -message_store = mongodb - -[drivers:management_store:mongodb] - -# Mongodb Connection URI -uri = mongodb://127.0.0.1:27017 - -[drivers:message_store:mongodb] - -# Mongodb Connection URI -uri = mongodb://127.0.0.1:27017 diff --git a/zaqar/tests/etc/websocket_mongodb_subscriptions.conf b/zaqar/tests/etc/websocket_mongodb_subscriptions.conf deleted file mode 100644 index 4bdd36fd..00000000 --- a/zaqar/tests/etc/websocket_mongodb_subscriptions.conf +++ /dev/null @@ -1,24 +0,0 @@ -[DEFAULT] -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] - -# Transport driver to use (string value) -transport = websocket - -# Storage driver to use (string value) -message_store = mongodb - -[drivers:management_store:mongodb] - -# Mongodb Connection URI -uri = mongodb://127.0.0.1:27017 - -[drivers:message_store:mongodb] - -# Mongodb Connection URI -uri = mongodb://127.0.0.1:27017 - -[storage] -message_pipeline = zaqar.notification.notifier \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_faulty.conf b/zaqar/tests/etc/wsgi_faulty.conf deleted file mode 100644 index d3b426c2..00000000 --- a/zaqar/tests/etc/wsgi_faulty.conf +++ /dev/null @@ -1,12 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = faulty -management_store = faulty - -[drivers:transport:wsgi] -port = 8888 diff --git a/zaqar/tests/etc/wsgi_fifo_mongodb.conf b/zaqar/tests/etc/wsgi_fifo_mongodb.conf deleted file mode 100644 index e4912744..00000000 --- a/zaqar/tests/etc/wsgi_fifo_mongodb.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb - -[drivers:transport:wsgi] -port = 8888 - -[drivers:message_store:mongodb] -uri = mongodb.fifo://127.0.0.1:27017 -database = message_zaqar_test_fifo -max_reconnect_attempts = 3 -reconnect_sleep = 0.001 - -# NOTE(kgriffs): Reduce from the default of 1000 to reduce the -# duration of related tests -max_attempts = 5 diff --git a/zaqar/tests/etc/wsgi_mongodb.conf b/zaqar/tests/etc/wsgi_mongodb.conf deleted file mode 100644 index f89ba18d..00000000 --- a/zaqar/tests/etc/wsgi_mongodb.conf +++ /dev/null @@ -1,25 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb - -[drivers:transport:wsgi] -port = 8888 - -[drivers:message_store:mongodb] -uri = mongodb://127.0.0.1:27017 -database = message_zaqar_test -max_reconnect_attempts = 3 -reconnect_sleep = 0.001 - -# NOTE(kgriffs): Reduce from the default of 1000 to reduce the -# duration of related tests -max_attempts = 5 - -[signed_url] -secret_key = test \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_mongodb_default_limits.conf b/zaqar/tests/etc/wsgi_mongodb_default_limits.conf deleted file mode 100644 index c4888e7e..00000000 --- a/zaqar/tests/etc/wsgi_mongodb_default_limits.conf +++ /dev/null @@ -1,6 +0,0 @@ -[DEFAULT] -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_mongodb_pooled.conf b/zaqar/tests/etc/wsgi_mongodb_pooled.conf deleted file mode 100644 index cae62c0b..00000000 --- a/zaqar/tests/etc/wsgi_mongodb_pooled.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -pooling = True -admin_mode = True -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb - -[drivers:message_store:mongodb] -uri = mongodb://127.0.0.1:27017 -database = zaqar_test_pooled - -[drivers:management_store:mongodb] -uri = mongodb://127.0.0.1:27017 -database = zaqar_test - -[pooling:catalog] -enable_virtual_pool = True \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_mongodb_pooled_disable_virtual_pool.conf b/zaqar/tests/etc/wsgi_mongodb_pooled_disable_virtual_pool.conf deleted file mode 100644 index 94aa9363..00000000 --- a/zaqar/tests/etc/wsgi_mongodb_pooled_disable_virtual_pool.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -pooling = True -admin_mode = True -unreliable = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb - -[drivers:message_store:mongodb] -uri = mongodb://127.0.0.1:27017 -database = zaqar_test_pooled - -[drivers:management_store:mongodb] -uri = mongodb://127.0.0.1:27017 -database = zaqar_test - -[pooling:catalog] -enable_virtual_pool = False \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_mongodb_validation.conf b/zaqar/tests/etc/wsgi_mongodb_validation.conf deleted file mode 100644 index 4edd9681..00000000 --- a/zaqar/tests/etc/wsgi_mongodb_validation.conf +++ /dev/null @@ -1,13 +0,0 @@ -[DEFAULT] -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb - -# Test support for deprecated options -[limits:transport] -metadata_size_uplimit = 64 - -[transport] -max_messages_post_size = 256 diff --git a/zaqar/tests/etc/wsgi_redis.conf b/zaqar/tests/etc/wsgi_redis.conf deleted file mode 100644 index f7b87b52..00000000 --- a/zaqar/tests/etc/wsgi_redis.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = redis - -[drivers:transport:wsgi] -port = 8888 - -[drivers:message_store:redis] -uri = redis://127.0.0.1:6379 - -# NOTE(kgriffs): Reduce from the default of 10 to reduce the -# duration of related tests -max_reconnect_attempts = 3 - -# NOTE(kgriffs): Reduce from the default of 1 to reduce the -# duration of related tests -reconnect_sleep = 0.1 \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_redis_pooled.conf b/zaqar/tests/etc/wsgi_redis_pooled.conf deleted file mode 100644 index 2bfd1061..00000000 --- a/zaqar/tests/etc/wsgi_redis_pooled.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -pooling = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = redis - -[drivers:message_store:redis] -uri = redis://127.0.0.1:6379 -max_reconnect_attempts = 3 -reconnect_sleep = 1 - -[drivers:management_store:redis] -uri = redis://127.0.0.1:6379 -max_reconnect_attempts = 3 -reconnect_sleep = 1 - -[pooling:catalog] -enable_virtual_pool = True \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_sqlalchemy.conf b/zaqar/tests/etc/wsgi_sqlalchemy.conf deleted file mode 100644 index 444f3042..00000000 --- a/zaqar/tests/etc/wsgi_sqlalchemy.conf +++ /dev/null @@ -1,15 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -admin_mode = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = mongodb -management_store = sqlalchemy - -[drivers:transport:wsgi] -bind = 0.0.0.0 -port = 8888 -workers = 20 diff --git a/zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf b/zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf deleted file mode 100644 index 7b547326..00000000 --- a/zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf +++ /dev/null @@ -1,16 +0,0 @@ -[DEFAULT] -pooling = True -admin_mode = True -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -message_store = sqlalchemy - -[drivers:transport:wsgi] -bind = 0.0.0.0 -port = 8888 -workers = 20 - -[pooling:catalog] -enable_virtual_pool = True \ No newline at end of file diff --git a/zaqar/tests/etc/wsgi_swift.conf b/zaqar/tests/etc/wsgi_swift.conf deleted file mode 100644 index 56e69bc0..00000000 --- a/zaqar/tests/etc/wsgi_swift.conf +++ /dev/null @@ -1,12 +0,0 @@ -[DEFAULT] -debug = False -verbose = False -enable_deprecated_api_versions = 1,1.1 - -[drivers] -transport = wsgi -management_store = sqlalchemy -message_store = swift - -[drivers:transport:wsgi] -port = 8888 diff --git a/zaqar/tests/faulty_storage.py b/zaqar/tests/faulty_storage.py deleted file mode 100644 index e09131b8..00000000 --- a/zaqar/tests/faulty_storage.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from zaqar import storage - -_CONFIG_GROUP = 'drivers:message_store:faulty' - - -class DataDriver(storage.DataDriverBase): - - _DRIVER_OPTIONS = [(_CONFIG_GROUP, - [cfg.StrOpt('uri', default='faulty://')])] - - def __init__(self, conf, cache, control_driver): - super(DataDriver, self).__init__(conf, cache, control_driver) - - def close(self): - pass - - @property - def default_options(self): - return {} - - @property - def capabilities(self): - raise NotImplementedError() - - def is_alive(self): - raise NotImplementedError() - - def _health(self): - raise NotImplementedError() - - @property - def queue_controller(self): - return self.control_driver.queue_controller - - @property - def message_controller(self): - return MessageController(self) - - @property - def claim_controller(self): - return None - - @property - def subscription_controller(self): - return None - - -class ControlDriver(storage.ControlDriverBase): - - def __init__(self, conf, cache): - super(ControlDriver, self).__init__(conf, cache) - - def close(self): - pass - - @property - def queue_controller(self): - return QueueController(self) - - @property - def catalogue_controller(self): - return None - - @property - def pools_controller(self): - return None - - @property - def flavors_controller(self): - return None - - -class QueueController(storage.Queue): - def __init__(self, driver): - pass - - def _list(self, project=None): - raise NotImplementedError() - - def _get(self, name, project=None): - raise NotImplementedError() - - def get_metadata(self, name, project=None): - raise NotImplementedError() - - def _create(self, name, metadata=None, project=None): - raise NotImplementedError() - - def _exists(self, name, project=None): - raise NotImplementedError() - - def set_metadata(self, name, metadata, project=None): - raise NotImplementedError() - - def _delete(self, name, project=None): - raise NotImplementedError() - - def _stats(self, name, project=None): - raise NotImplementedError() - - -class MessageController(storage.Message): - def __init__(self, driver): - pass - - def first(self, queue_name, project=None, sort=1): - raise NotImplementedError() - - def get(self, queue, message_id, project=None): - raise NotImplementedError() - - def bulk_get(self, queue, message_ids, project=None): - raise NotImplementedError() - - def list(self, queue, project=None, marker=None, - limit=None, echo=False, client_uuid=None): - raise NotImplementedError() - - def post(self, queue, messages, project=None): - raise NotImplementedError() - - def pop(self, queue, pop_limit, project=None): - raise NotImplementedError() - - def delete(self, queue, message_id, project=None, claim=None): - raise NotImplementedError() - - def bulk_delete(self, queue, message_ids, project=None): - raise NotImplementedError() diff --git a/zaqar/tests/functional/__init__.py b/zaqar/tests/functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/functional/base.py b/zaqar/tests/functional/base.py deleted file mode 100644 index a9fc4305..00000000 --- a/zaqar/tests/functional/base.py +++ /dev/null @@ -1,409 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import multiprocessing -import os - -import jsonschema -from oslo_utils import timeutils -import six - -from zaqar.api.v1 import response as response_v1 -from zaqar.api.v1_1 import response as response_v1_1 -from zaqar.api.v2 import response as response_v2 -from zaqar import bootstrap -from zaqar.storage import mongodb -from zaqar.storage.redis import driver as redis -from zaqar import tests as testing -from zaqar.tests.functional import config -from zaqar.tests.functional import helpers -from zaqar.tests.functional import http -from zaqar.tests import helpers as base_helpers -from zaqar.transport import base as transport_base -# TODO(flaper87): This is necessary to register, -# wsgi configs and won't be permanent. It'll be -# refactored as part of the work for this blueprint -from zaqar.transport import validation -from zaqar.transport import wsgi # noqa - -# TODO(kgriffs): Run functional tests to a devstack gate job and -# set this using an environment variable or something. -# -# TODO(kgriffs): Find a more general way to do this; we seem to be -# using this environ flag pattern over and over againg. -_TEST_INTEGRATION = os.environ.get('ZAQAR_TEST_INTEGRATION') is not None - - -class FunctionalTestBase(testing.TestBase): - - server = None - server_class = None - config_file = None - class_bootstrap = None - # NOTE(Eva-i): ttl_gc_interval is the known maximum time interval between - # automatic resource TTL expirations. Depends on message store back end. - class_ttl_gc_interval = None - wipe_dbs_projects = set([]) - - def setUp(self): - super(FunctionalTestBase, self).setUp() - # NOTE(flaper87): Config can't be a class - # attribute because it may be necessary to - # modify it at runtime which will affect - # other instances running instances. - self.cfg = config.load_config() - - if not self.cfg.run_tests: - self.skipTest("Functional tests disabled") - - config_file = self.config_file or self.cfg.zaqar.config - - config_file = base_helpers.override_mongo_conf(config_file, self) - - self.mconf = self.load_conf(config_file) - - validator = validation.Validator(self.mconf) - self.limits = validator._limits_conf - - transport_base._config_options() - - self.resource_defaults = transport_base.ResourceDefaults(self.mconf) - - # Always register options - self.__class__.class_bootstrap = bootstrap.Bootstrap(self.mconf) - self.class_bootstrap.transport - - datadriver = self.class_bootstrap.storage._storage - if isinstance(datadriver, redis.DataDriver): - self.__class__.class_ttl_gc_interval = 1 - if isinstance(datadriver, mongodb.DataDriver): - # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute - self.__class__.class_ttl_gc_interval = 60 - - if _TEST_INTEGRATION: - if not (self.server and self.server.is_alive()): - self.server = self.server_class() - self.server.start(self.mconf) - self.addCleanup(self.server.process.terminate) - - self.client = http.Client() - else: - if self.server_class == ZaqarAdminServer: - self.mconf.pooling = True - self.mconf.admin_mode = True - - self.addCleanup(self.class_bootstrap.storage.close) - self.addCleanup(self.class_bootstrap.control.close) - self.client = http.WSGIClient(self.class_bootstrap.transport.app) - - self.headers = helpers.create_zaqar_headers(self.cfg) - - self.headers_response_with_body = {'location', 'content-type'} - - self.client.set_headers(self.headers) - - # Store information required for cleaning databases after - # execution of test class - self.wipe_dbs_projects.add(self.headers["X-Project-ID"]) - - def tearDown(self): - super(FunctionalTestBase, self).tearDown() - # Project might has changed during test case execution. - # Lets add it again to the set. - self.wipe_dbs_projects.add(self.headers["X-Project-ID"]) - - @staticmethod - def _if_mongo_datadriver_drop_dbs(driver): - """Drops MongoDB datadriver's databases. - - :param driver: instance of zaqar.storage.mongodb.driver.DataDriver - """ - if not isinstance(driver, mongodb.DataDriver): - return - for db in driver.message_databases: - driver.connection.drop_database(db) - subscription_db = driver.subscriptions_database - driver.connection.drop_database(subscription_db) - - @staticmethod - def _if_mongo_controldriver_drop_dbs(driver): - """Drops all MongoDB controldriver's databases. - - :param driver: instance of zaqar.storage.mongodb.driver.ControlDriver - """ - if not isinstance(driver, mongodb.ControlDriver): - return - driver.connection.drop_database(driver.database) - driver.connection.drop_database(driver.queues_database) - - @classmethod - def _pooling_drop_dbs_by_project(cls, xproject): - """Finds all pool drivers by project, drops all their databases. - - Assumes that pooling is enabled. - - :param xproject: project name to use for pool drivers search - """ - datadriver = cls.class_bootstrap.storage._storage - controldriver = cls.class_bootstrap.control - # Let's get list of all queues by project - queue_generator = controldriver.queue_controller.list(project=xproject) - queues = list(next(queue_generator)) - # Let's extract all queue names from the list of queues - queue_names = [q['name'] for q in queues] - # Finally let's use queues names to get each one of pool datadrivers - catalog = datadriver._pool_catalog - for queue_name in queue_names: - pool_pipe_driver = catalog.lookup(queue_name, project=xproject) - pool_datadriver = pool_pipe_driver._storage - if pool_datadriver is not None: - # Let's delete the queue, so the next invocation of - # pooling_catalog.lookup() will not recreate pool driver - controldriver.queue_controller.delete(queue_name) - # Let's drop pool's databases - cls._if_mongo_datadriver_drop_dbs(pool_datadriver) - - @classmethod - def tearDownClass(cls): - """Cleans up after test class execution. - - Drops all databases left. - Closes connections to databases. - """ - # Bootstrap can be None if all test cases were skipped, so nothing to - # clean - if cls.class_bootstrap is None: - return - - datadriver = cls.class_bootstrap.storage._storage - controldriver = cls.class_bootstrap.control - - if cls.class_bootstrap.conf.pooling: - # Pooling detected, let's drop pooling-specific databases - for p in cls.wipe_dbs_projects: - # This will find all pool databases by project and drop them - cls._pooling_drop_dbs_by_project(p) - controldriver.pools_controller.drop_all() - controldriver.flavors_controller.drop_all() - else: - # No pooling detected, let's just drop datadriver's databases - cls._if_mongo_datadriver_drop_dbs(datadriver) - - cls.class_bootstrap.storage.close() - - # Let's drop controldriver's databases - controldriver.catalogue_controller.drop_all() - cls._if_mongo_controldriver_drop_dbs(controldriver) - - controldriver.close() - - def assertIsSubset(self, required_values, actual_values): - """Checks if a list is subset of another. - - :param required_values: superset list. - :param required_values: subset list. - """ - - form = 'Missing Header(s) - {0}' - self.assertTrue(required_values.issubset(actual_values), - msg=form.format((required_values - actual_values))) - - def assertMessageCount(self, actualCount, expectedCount): - """Checks if number of messages returned <= limit - - :param expectedCount: limit value passed in the url (OR) default(10). - :param actualCount: number of messages returned in the API response. - """ - msg = ('More Messages returned than allowed: expected count = {0}' - ', actual count = {1}'.format(expectedCount, actualCount)) - self.assertLessEqual(actualCount, expectedCount, msg) - - def assertQueueStats(self, result_json, claimed): - """Checks the Queue Stats results - - :param result_json: json response returned for Queue Stats. - :param claimed: expected number of claimed messages. - """ - total = self.limits.max_messages_per_claim_or_pop - free = total - claimed - - self.assertEqual(claimed, result_json['messages']['claimed']) - self.assertEqual(free, result_json['messages']['free']) - self.assertEqual(total, result_json['messages']['total']) - - if 'oldest' in result_json['messages']: - oldest_message = result_json['messages']['oldest'] - self.verify_message_stats(oldest_message) - - newest_message = result_json['messages']['newest'] - self.verify_message_stats(newest_message) - - def assertSchema(self, response, expectedSchemaName): - """Compares the json response with the expected schema - - :param response: response json returned by the API. - :type response: dict - :param expectedSchema: expected schema definition for response. - :type expectedSchema: string - """ - try: - expectedSchema = self.response.get_schema(expectedSchemaName) - jsonschema.validate(response, expectedSchema) - except jsonschema.ValidationError as message: - assert False, message - - def verify_message_stats(self, message): - """Verifies the oldest & newest message stats - - :param message: oldest (or) newest message returned by - queue_name/stats. - """ - expected_keys = ['age', 'created', 'href'] - - response_keys = message.keys() - response_keys = sorted(response_keys) - self.assertEqual(expected_keys, response_keys) - - # Verify that age has valid values - age = message['age'] - msg = 'Invalid Age {0}'.format(age) - self.assertLessEqual(0, age, msg) - self.assertLessEqual(age, self.limits.max_message_ttl, msg) - - # Verify that GET on href returns 200 - path = message['href'] - result = self.client.get(path) - self.assertEqual(200, result.status_code) - - # Verify that created time falls within the last 10 minutes - # NOTE(malini): The messages are created during the test. - created_time = message['created'] - created_time = timeutils.normalize_time( - timeutils.parse_isotime(created_time)) - now = timeutils.utcnow() - - delta = timeutils.delta_seconds(before=created_time, after=now) - # NOTE(malini): The 'int()' below is a work around for the small time - # difference between julianday & UTC. - # (needed to pass this test on sqlite driver) - delta = int(delta) - - msg = ('Invalid Time Delta {0}, Created time {1}, Now {2}' - .format(delta, created_time, now)) - self.assertLessEqual(0, delta, msg) - self.assertLessEqual(delta, 6000, msg) - - -@six.add_metaclass(abc.ABCMeta) -class Server(object): - - name = "zaqar-functional-test-server" - - def __init__(self): - self.process = None - - @abc.abstractmethod - def get_target(self, conf): - """Prepares the target object - - This method is meant to initialize server's - bootstrap and return a callable to run the - server. - - :param conf: The config instance for the - bootstrap class - :returns: A callable object - """ - - def is_alive(self): - """Returns True IFF the server is running.""" - - if self.process is None: - return False - - return self.process.is_alive() - - def start(self, conf): - """Starts the server process. - - :param conf: The config instance to use for - the new process - :returns: A `multiprocessing.Process` instance - """ - - # TODO(flaper87): Re-use running instances. - target = self.get_target(conf) - - if not callable(target): - raise RuntimeError("Target not callable") - - self.process = multiprocessing.Process(target=target, - name=self.name) - self.process.daemon = True - self.process.start() - - # NOTE(flaper87): Give it a second - # to boot. - self.process.join(1) - return self.process - - def stop(self): - """Terminates a process - - This method kills a process by - calling `terminate`. Note that - children of this process won't be - terminated but become orphaned. - """ - self.process.terminate() - - -class ZaqarServer(Server): - - name = "zaqar-wsgiref-test-server" - - def get_target(self, conf): - server = bootstrap.Bootstrap(conf) - return server.run - - -class ZaqarAdminServer(Server): - - name = "zaqar-admin-wsgiref-test-server" - - def get_target(self, conf): - conf.admin_mode = True - server = bootstrap.Bootstrap(conf) - return server.run - - -class V1FunctionalTestBase(FunctionalTestBase): - def setUp(self): - super(V1FunctionalTestBase, self).setUp() - self.response = response_v1.ResponseSchema(self.limits) - - -class V1_1FunctionalTestBase(FunctionalTestBase): - def setUp(self): - super(V1_1FunctionalTestBase, self).setUp() - self.response = response_v1_1.ResponseSchema(self.limits) - - -class V2FunctionalTestBase(FunctionalTestBase): - def setUp(self): - super(V2FunctionalTestBase, self).setUp() - self.response = response_v2.ResponseSchema(self.limits) diff --git a/zaqar/tests/functional/config.py b/zaqar/tests/functional/config.py deleted file mode 100644 index db32bec0..00000000 --- a/zaqar/tests/functional/config.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from oslo_config import cfg -from oslo_log import log - - -_DEFAULT = ( - cfg.BoolOpt("run_tests", default=True), -) - - -_ZAQAR_OPTIONS = ( - cfg.StrOpt("url", default="http://127.0.0.1:8888"), - cfg.StrOpt("config", default="functional-zaqar.conf"), -) - - -_HEADERS_OPTIONS = ( - cfg.StrOpt("user_agent", default="FunctionalTests"), - cfg.StrOpt("project_id", default="123456"), -) - - -def load_config(): - conf = cfg.ConfigOpts() - conf.register_opts(_DEFAULT) - conf.register_opts(_ZAQAR_OPTIONS, group="zaqar") - conf.register_opts(_HEADERS_OPTIONS, group="headers") - - log.register_options(conf) - - conf_path = os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], - "functional-tests.conf") - conf(args=[], default_config_files=[conf_path]) - return conf diff --git a/zaqar/tests/functional/helpers.py b/zaqar/tests/functional/helpers.py deleted file mode 100644 index b357d9f3..00000000 --- a/zaqar/tests/functional/helpers.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import random -import string -import uuid - - -def create_zaqar_headers(conf): - """Returns headers to be used for all Zaqar requests.""" - - headers = { - "User-Agent": conf.headers.user_agent, - "Accept": "application/json", - "X-Project-ID": conf.headers.project_id, - "Client-ID": str(uuid.uuid1()), - } - - return headers - - -def generate_dict(dict_length): - """Returns dictionary of specified length. Key:Value is random data. - - :param dict_length: length of the dictionary - """ - return dict([(generate_random_string(), generate_random_string()) - for _ in range(dict_length)]) - - -def generate_random_string(length=10): - """Returns an ASCII string of specified length.""" - chars = string.ascii_letters + string.digits - return ''.join(random.choice(chars) for i in range(length)) - - -def single_message_body(messagesize=2, default_ttl=False, ttl=None): - """Returns message body for one message. - - :param messagesize: Size of the message body to generate (default 2) - :param default_ttl: Set to True to not set an explicit TTL value in - the message request, in which case the server will use a default - value (default False). Note that default TTL is only supported in - v1.1 of the API. - :param ttl: Number of seconds to provide as the TTL for each - message. If not specified, a random value is chosen in the - range: (60 <= TTL <= 1209600). If `default_ttl` is True, the - `ttl` param is ignored. - """ - - message_body = {} - message_body['body'] = generate_dict(messagesize) - - if not default_ttl: - if ttl is not None: - message_body['ttl'] = ttl - else: - message_body['ttl'] = random.randint(60, 1209600) - - return message_body - - -def create_message_body(messagecount, **kwargs): - """Returns request body for message-posting tests. - - :param messagecount: Number of messages to create - :param **kwargs: Same as for `single_message_body` - """ - - return [single_message_body(**kwargs) - for i in range(messagecount)] - - -def create_message_body_v1_1(messagecount, **kwargs): - """Returns request body for message-posting tests. - - :param messagecount: Number of messages to create - :param **kwargs: Same as for `single_message_body` - """ - - return { - "messages": [single_message_body(**kwargs) - for i in range(messagecount)] - } - - -def create_pool_body(**kwargs): - pool_body = { - 'weight': kwargs['weight'], - 'uri': kwargs['uri'], - 'options': { - 'max_retry_sleep': 1, - 'partitions': 8 - } - } - - return pool_body - - -def create_subscription_body(subscriber='http://fake:8080', ttl=600, - options_key='funny', options_value='no'): - options = {options_key: options_value} - return {'subscriber': subscriber, 'options': options, 'ttl': ttl} diff --git a/zaqar/tests/functional/http.py b/zaqar/tests/functional/http.py deleted file mode 100644 index 9bff7574..00000000 --- a/zaqar/tests/functional/http.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import json - -from falcon import testing as ftest -from oslo_serialization import jsonutils -import requests -import six - - -def _build_url(method): - - @functools.wraps(method) - def wrapper(self, url='', **kwargs): - - if not url.startswith("http"): - if not self.base_url: - raise RuntimeError("Base url not set") - - url = self.base_url + url or '' - - return method(self, url, **kwargs) - - return wrapper - - -class Client(object): - - def __init__(self): - # NOTE(kgriffs): used by @_build_url - self.base_url = None - self.session = requests.session() - - def set_base_url(self, base_url): - self.base_url = base_url - - def set_headers(self, headers): - self.session.headers.update(headers) - - @_build_url - def get(self, url=None, **kwargs): - """Does http GET.""" - return self.session.get(url, **kwargs) - - @_build_url - def head(self, url=None, **kwargs): - """Does http HEAD.""" - return self.session.head(url, **kwargs) - - @_build_url - def post(self, url=None, **kwargs): - """Does http POST.""" - - if "data" in kwargs: - kwargs['data'] = json.dumps(kwargs["data"]) - - return self.session.post(url, **kwargs) - - @_build_url - def put(self, url=None, **kwargs): - """Does http PUT.""" - - if "data" in kwargs: - kwargs['data'] = json.dumps(kwargs["data"]) - - return self.session.put(url, **kwargs) - - @_build_url - def delete(self, url=None, **kwargs): - """Does http DELETE.""" - return self.session.delete(url, **kwargs) - - @_build_url - def patch(self, url=None, **kwargs): - """Does http PATCH.""" - if "data" in kwargs: - kwargs['data'] = json.dumps(kwargs["data"]) - return self.session.patch(url, **kwargs) - - -class ResponseMock(object): - """Mocks part of the Requests library's Response object.""" - - def __init__(self, srmock, wsgi_result): - self.status_code = int(srmock.status.partition(' ')[0]) - self._body = wsgi_result[0] if wsgi_result else '' - self.headers = srmock.headers_dict - - def json(self): - return jsonutils.loads(self._body) - - -class WSGIClient(object): - """Same interface as Client, but speaks directly to a WSGI callable.""" - - def __init__(self, app): - # NOTE(kgriffs): used by @_build_url - self.base_url = None - - self.app = app - self.headers = {} - - @staticmethod - def _sanitize_headers(headers): - # NOTE(kgriffs): Workaround for a little create_environ bug - return dict([(key, '' if value is None else value) - for key, value in headers.items()]) - - def _simulate_request(self, url, method='GET', data=None, - headers=None, params=None): - """Simulate a request. - - Simulates a WSGI request to the API for testing. - - :param url: Request path for the desired resource - :param method: (Default 'GET') The HTTP method to send - :param data: (Default None) A dict that will be serialized - to JSON and submitted as the body of the request. May - also be a pre-serialized string. - :param headers: (Default None) A dict containing - extra HTTP headers to send. - :param params: (Default None) A dict of parameters - to use in the query string for the request. - - :returns: a requests response instance - """ - - if headers is None: - headers = self.headers - - headers = self._sanitize_headers(headers) - - if data is None: - body = '' - elif isinstance(data, str) or isinstance(data, six.text_type): - body = data - else: - body = json.dumps(data, ensure_ascii=False) - - parsed_url = six.moves.urllib_parse.urlparse(url) - - query = parsed_url.query - - if params is not None: - extra = '&'.join([key + '=' + str(value) - for key, value in params.items()]) - - query += '&' + extra - - environ = ftest.create_environ(method=method, - path=parsed_url.path, - query_string=query, - headers=headers, - body=body) - - srmock = ftest.StartResponseMock() - wsgi_result = self.app(environ, srmock) - - return ResponseMock(srmock, wsgi_result) - - def set_base_url(self, base_url): - self.base_url = base_url - - def set_headers(self, headers): - self.headers.update(headers) - - @_build_url - def get(self, url=None, **kwargs): - """Simulate a GET request.""" - kwargs['method'] = 'GET' - return self._simulate_request(url=url, **kwargs) - - @_build_url - def head(self, url=None, **kwargs): - """Simulate a HEAD request.""" - kwargs['method'] = 'HEAD' - return self._simulate_request(url=url, **kwargs) - - @_build_url - def post(self, url=None, **kwargs): - """Simulate a POST request.""" - kwargs['method'] = 'POST' - return self._simulate_request(url=url, **kwargs) - - @_build_url - def put(self, url=None, **kwargs): - """Simulate a PUT request.""" - kwargs['method'] = 'PUT' - return self._simulate_request(url=url, **kwargs) - - @_build_url - def delete(self, url=None, **kwargs): - """Simulate a DELETE request.""" - kwargs['method'] = 'DELETE' - return self._simulate_request(url=url, **kwargs) - - @_build_url - def patch(self, url=None, **kwargs): - """Simulate a PATCH request.""" - kwargs['method'] = 'PATCH' - return self._simulate_request(url=url, **kwargs) diff --git a/zaqar/tests/functional/websocket/__init__.py b/zaqar/tests/functional/websocket/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/functional/websocket/test_queues.py b/zaqar/tests/functional/websocket/test_queues.py deleted file mode 100644 index 5bb17c2e..00000000 --- a/zaqar/tests/functional/websocket/test_queues.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from oslo_utils import uuidutils - -from testtools import testcase -import websocket - -from zaqar.tests.functional import base - - -class TestQueues(base.V1_1FunctionalTestBase): - - config_file = 'websocket_mongodb.conf' - server_class = base.ZaqarServer - - def setUp(self): - if not base._TEST_INTEGRATION: - raise testcase.TestSkipped('Only run in integration mode') - super(TestQueues, self).setUp() - self.project_id = uuidutils.generate_uuid() - self.headers = {'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id} - self.client = websocket.create_connection('ws://localhost:9000/') - self.addCleanup(self.client.close) - - def test_list_empty(self): - self.client.send( - json.dumps({'action': 'queue_list', 'headers': self.headers})) - response = json.loads(self.client.recv()) - self.assertEqual( - {'body': {'queues': []}, - 'headers': {'status': 200}, - 'request': {'action': 'queue_list', 'body': {}, 'api': 'v2', - 'headers': self.headers}}, - response) - - def test_list(self): - self.client.send( - json.dumps({'action': 'queue_create', - 'body': {'queue_name': 'my_queue'}, - 'headers': self.headers})) - response = json.loads(self.client.recv()) - self.assertEqual( - {'body': 'Queue my_queue created.', - 'headers': {'status': 201}, - 'request': {'action': 'queue_create', - 'body': {'queue_name': 'my_queue'}, 'api': 'v2', - 'headers': self.headers}}, - response) - self.client.send( - json.dumps({'action': 'queue_list', 'headers': self.headers})) - response = json.loads(self.client.recv()) - self.assertEqual( - {'body': {'queues': [{'name': 'my_queue'}]}, - 'headers': {'status': 200}, - 'request': {'action': 'queue_list', 'body': {}, 'api': 'v2', - 'headers': self.headers}}, - response) diff --git a/zaqar/tests/functional/wsgi/__init__.py b/zaqar/tests/functional/wsgi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/functional/wsgi/test_versions.py b/zaqar/tests/functional/wsgi/test_versions.py deleted file mode 100644 index 66540aeb..00000000 --- a/zaqar/tests/functional/wsgi/test_versions.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.tests.functional import base - - -class TestVersions(base.FunctionalTestBase): - - """Tests for Versions Resource.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestVersions, self).setUp() - self.base_url = "{url}/".format(url=self.cfg.zaqar.url) - self.client.set_base_url(self.base_url) - - def test_get_versions_without_headers(self): - result = self.client.get('', headers={}) - self.assertIn("versions", result.json()) - - def test_get_versions_with_headers(self): - result = self.client.get('') - self.assertIn("versions", result.json()) diff --git a/zaqar/tests/functional/wsgi/v1/__init__.py b/zaqar/tests/functional/wsgi/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/functional/wsgi/v1/test_claims.py b/zaqar/tests/functional/wsgi/v1/test_claims.py deleted file mode 100644 index 0083a434..00000000 --- a/zaqar/tests/functional/wsgi/v1/test_claims.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -import ddt - -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -@ddt.ddt -class TestClaims(base.V1FunctionalTestBase): - """Tests for Claims.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestClaims, self).setUp() - - self.queue = uuid.uuid1() - self.queue_url = ("{url}/{version}/queues/{queue}".format( - url=self.cfg.zaqar.url, - version="v1", - queue=self.queue)) - - self.client.put(self.queue_url) - - self.claim_url = self.queue_url + '/claims' - self.client.set_base_url(self.claim_url) - - # Post Messages - url = self.queue_url + '/messages' - - doc = helpers.create_message_body( - messagecount=self.limits.max_messages_per_page) - - for i in range(10): - result = self.client.post(url, data=doc) - self.assertEqual(201, result.status_code) - - @ddt.data({}, dict(limit=2)) - def test_claim_messages(self, params): - """Claim messages.""" - message_count = params.get('limit', - self.limits.max_messages_per_claim_or_pop) - - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(params=params, data=doc) - self.assertEqual(201, result.status_code) - - actual_message_count = len(result.json()) - self.assertMessageCount(actual_message_count, message_count) - - response_headers = set(result.headers.keys()) - self.assertIsSubset(self.headers_response_with_body, response_headers) - self.assertSchema(result.json(), 'claim_create') - - test_claim_messages.tags = ['smoke', 'positive'] - - def test_query_claim(self): - """Query Claim.""" - params = {'limit': 1} - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(params=params, data=doc) - self.assertEqual(201, result.status_code) - - location = result.headers['Location'] - - url = self.cfg.zaqar.url + location - - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - self.assertSchema(result.json(), 'claim_get') - - test_query_claim.tags = ['smoke', 'positive'] - - def test_claim_more_than_allowed(self): - """Claim more than max allowed per request. - - Zaqar allows a maximum of 20 messages per claim by default. - """ - params = {"limit": self.limits.max_messages_per_claim_or_pop + 1} - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(params=params, data=doc) - self.assertEqual(400, result.status_code) - - test_claim_more_than_allowed.tags = ['negative'] - - def test_claim_patch(self): - """Update Claim.""" - # Test Setup - Post Claim - doc = {"ttl": 300, "grace": 400} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Patch Claim - claim_location = result.headers['Location'] - url = self.cfg.zaqar.url + claim_location - doc_updated = {"ttl": 300} - - result = self.client.patch(url, data=doc_updated) - self.assertEqual(204, result.status_code) - - # verify that the claim TTL is updated - result = self.client.get(url) - new_ttl = result.json()['ttl'] - self.assertEqual(300, new_ttl) - - test_claim_patch.tags = ['smoke', 'positive'] - - def test_delete_claimed_message(self): - """Delete message belonging to a Claim.""" - # Test Setup - Post claim - doc = {"ttl": 60, "grace": 60} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Delete Claimed Messages - for rst in result.json(): - href = rst['href'] - url = self.cfg.zaqar.url + href - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - test_delete_claimed_message.tags = ['smoke', 'positive'] - - def test_claim_release(self): - """Release Claim.""" - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Extract claim location and construct the claim URL. - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - - # Release Claim. - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - test_claim_release.tags = ['smoke', 'positive'] - - @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) - def test_claim_invalid_ttl(self, ttl): - """Post Claim with invalid TTL. - - The request JSON body will have a TTL value - outside the allowed range.Allowed ttl values is - 60 <= ttl <= 43200. - """ - doc = {"ttl": ttl, "grace": 100} - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_claim_invalid_ttl.tags = ['negative'] - - @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) - def test_claim_invalid_grace(self, grace): - """Post Claim with invalid grace. - - The request JSON body will have a grace value - outside the allowed range.Allowed grace values is - 60 <= grace <= 43200. - """ - doc = {"ttl": 100, "grace": grace} - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_claim_invalid_grace.tags = ['negative'] - - @ddt.data(0, -100, 30, 10000000000000000000) - def test_claim_invalid_limit(self, grace): - """Post Claim with invalid limit. - - The request url will have a limit outside the allowed range. - Allowed limit values are 0 < limit <= 20(default max). - """ - doc = {"ttl": 100, "grace": grace} - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_claim_invalid_limit.tags = ['negative'] - - @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) - def test_patch_claim_invalid_ttl(self, ttl): - """Patch Claim with invalid TTL. - - The request JSON body will have a TTL value - outside the allowed range.Allowed ttl values is - 60 <= ttl <= 43200. - """ - doc = {"ttl": 100, "grace": 100} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Extract claim location and construct the claim URL. - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - - # Patch Claim. - doc = {"ttl": ttl} - result = self.client.patch(url, data=doc) - self.assertEqual(400, result.status_code) - - test_patch_claim_invalid_ttl.tags = ['negative'] - - def test_query_non_existing_claim(self): - """Query Non Existing Claim.""" - path = '/non-existing-claim' - result = self.client.get(path) - self.assertEqual(404, result.status_code) - - test_query_non_existing_claim.tags = ['negative'] - - def test_patch_non_existing_claim(self): - """Patch Non Existing Claim.""" - path = '/non-existing-claim' - doc = {"ttl": 400} - result = self.client.patch(path, data=doc) - self.assertEqual(404, result.status_code) - - test_patch_non_existing_claim.tags = ['negative'] - - def test_delete_non_existing_claim(self): - """Patch Non Existing Claim.""" - path = '/non-existing-claim' - result = self.client.delete(path) - self.assertEqual(204, result.status_code) - - test_delete_non_existing_claim.tags = ['negative'] - - def tearDown(self): - """Delete Queue after Claim Test.""" - super(TestClaims, self).tearDown() - self.client.delete(self.queue_url) diff --git a/zaqar/tests/functional/wsgi/v1/test_messages.py b/zaqar/tests/functional/wsgi/v1/test_messages.py deleted file mode 100644 index 4306b81e..00000000 --- a/zaqar/tests/functional/wsgi/v1/test_messages.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division - -import json -import uuid - -import ddt - -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -@ddt.ddt -class TestMessages(base.V1FunctionalTestBase): - - """Tests for Messages.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestMessages, self).setUp() - - self.queue = uuid.uuid1() - self.queue_url = ("{url}/{version}/queues/{queue}".format( - url=self.cfg.zaqar.url, - version="v1", - queue=self.queue)) - - self.client.put(self.queue_url) - - self.message_url = self.queue_url + '/messages' - self.client.set_base_url(self.message_url) - - def tearDown(self): - self.client.delete(self.queue_url) - super(TestMessages, self).tearDown() - - def _post_large_bulk_insert(self, offset): - """Insert just under than max allowed messages.""" - - message1 = {"body": '', "ttl": 300} - message2 = {"body": '', "ttl": 120} - - doc = [message1, message2] - overhead = len(json.dumps(doc)) - - half_size = (self.limits.max_messages_post_size - overhead) // 2 - message1['body'] = helpers.generate_random_string(half_size) - message2['body'] = helpers.generate_random_string(half_size + offset) - - return self.client.post(data=doc) - - def test_message_single_insert(self): - """Insert Single Message into the Queue. - - This test also verifies that claimed messages are - retuned (or not) depending on the include_claimed flag. - """ - doc = helpers.create_message_body(messagecount=1) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - response_headers = set(result.headers.keys()) - self.assertIsSubset(self.headers_response_with_body, response_headers) - - # GET on posted message - href = result.json()['resources'][0] - url = self.cfg.zaqar.url + href - - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - # Compare message metadata - result_body = result.json()['body'] - posted_metadata = doc[0]['body'] - self.assertEqual(posted_metadata, result_body) - - # Post a claim & verify the include_claimed flag. - url = self.queue_url + '/claims' - doc = {"ttl": 300, "grace": 100} - result = self.client.post(url, data=doc) - self.assertEqual(201, result.status_code) - - params = {'include_claimed': True, - 'echo': True} - result = self.client.get(params=params) - self.assertEqual(200, result.status_code) - - response_message_body = result.json()["messages"][0]["body"] - self.assertEqual(posted_metadata, response_message_body) - - # By default, include_claimed = false - result = self.client.get(self.message_url) - self.assertEqual(204, result.status_code) - - test_message_single_insert.tags = ['smoke', 'positive'] - - def test_message_bulk_insert(self): - """Bulk Insert Messages into the Queue.""" - message_count = self.limits.max_messages_per_page - doc = helpers.create_message_body(messagecount=message_count) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # GET on posted messages - location = result.headers['location'] - url = self.cfg.zaqar.url + location - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - self.skipTest('Bug #1273335 - Get set of messages returns wrong hrefs ' - '(happens randomly)') - - # Verify that the response json schema matches the expected schema - self.assertSchema(result.json(), 'message_get_many') - - # Compare message metadata - result_body = [result.json()[i]['body'] - for i in range(len(result.json()))] - result_body.sort() - - posted_metadata = [doc[i]['body'] - for i in range(message_count)] - posted_metadata.sort() - - self.assertEqual(posted_metadata, result_body) - - test_message_bulk_insert.tags = ['smoke', 'positive'] - - @ddt.data({}, {'limit': 5}) - def test_get_message(self, params): - """Get Messages.""" - - expected_msg_count = params.get('limit', 10) - - # Test Setup - doc = helpers.create_message_body( - messagecount=self.limits.max_messages_per_page) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - url = '' - params['echo'] = True - - # Follow the hrefs & perform GET, till the end of messages i.e. http - # 204 - while result.status_code in [201, 200]: - result = self.client.get(url, params=params) - self.assertIn(result.status_code, [200, 204]) - - if result.status_code == 200: - actual_msg_count = len(result.json()['messages']) - self.assertMessageCount(actual_msg_count, expected_msg_count) - - self.assertSchema(result.json(), 'message_list') - - href = result.json()['links'][0]['href'] - url = self.cfg.zaqar.url + href - - self.assertEqual(204, result.status_code) - - test_get_message.tags = ['smoke', 'positive'] - - def test_message_delete(self): - """Delete Message.""" - # Test Setup - doc = helpers.create_message_body(messagecount=1) - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Delete posted message - href = result.json()['resources'][0] - url = self.cfg.zaqar.url + href - - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - result = self.client.get(url) - self.assertEqual(404, result.status_code) - - test_message_delete.tags = ['smoke', 'positive'] - - def test_message_bulk_delete(self): - """Bulk Delete Messages.""" - doc = helpers.create_message_body(messagecount=10) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Delete posted messages - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - result = self.client.get(url) - self.assertEqual(204, result.status_code) - - test_message_bulk_delete.tags = ['smoke', 'positive'] - - def test_message_delete_nonexisting(self): - """Delete non-existing Messages.""" - result = self.client.delete('/non-existing') - - self.assertEqual(204, result.status_code) - - test_message_delete_nonexisting.tags = ['negative'] - - def test_message_partial_delete(self): - """Delete Messages will be partially successful.""" - doc = helpers.create_message_body(messagecount=3) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Delete posted message - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - url += ',nonexisting' - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - test_message_partial_delete.tags = ['negative'] - - def test_message_partial_get(self): - """Get Messages will be partially successful.""" - doc = helpers.create_message_body(messagecount=3) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Get posted message and a nonexisting message - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - url += ',nonexisting' - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - self.assertSchema(result.json(), "message_get_many") - - test_message_partial_get.tags = ['negative'] - - @ddt.data(-10, -1, 0) - def test_message_bulk_insert_large_bodies(self, offset): - """Insert just under than max allowed messages.""" - result = self._post_large_bulk_insert(offset) - self.assertEqual(201, result.status_code) - - test_message_bulk_insert_large_bodies.tags = ['positive'] - - @ddt.data(1, 10) - def test_message_bulk_insert_large_bodies_(self, offset): - """Insert just under than max allowed messages.""" - result = self._post_large_bulk_insert(offset) - self.assertEqual(400, result.status_code) - - test_message_bulk_insert_large_bodies_.tags = ['negative'] - - def test_message_bulk_insert_oversized(self): - """Insert more than max allowed size.""" - - doc = '[{{"body": "{0}", "ttl": 300}}, {{"body": "{1}", "ttl": 120}}]' - overhead = len(doc.format('', '')) - - half_size = (self.limits.max_messages_post_size - overhead) // 2 - doc = doc.format(helpers.generate_random_string(half_size), - helpers.generate_random_string(half_size + 1)) - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_message_bulk_insert_oversized.tags = ['negative'] - - @ddt.data(10000000000000000000, -100, 0, 30, -10000000000000000000) - def test_message_get_invalid_limit(self, limit): - """Get Messages with invalid value for limit. - - Allowed values for limit are 0 < limit <= 20(configurable). - """ - params = {'limit': limit} - result = self.client.get(params=params) - self.assertEqual(400, result.status_code) - - test_message_get_invalid_limit.tags = ['negative'] - - def test_message_bulk_delete_negative(self): - """Delete more messages than allowed in a single request. - - By default, max messages that can be deleted in a single - request is 20. - """ - url = (self.message_url + '?ids=' + - ','.join(str(i) for i in - range(self.limits.max_messages_per_page + 1))) - result = self.client.delete(url) - - self.assertEqual(400, result.status_code) - - test_message_bulk_delete_negative.tags = ['negative'] - - def test_message_bulk_get_negative(self): - """GET more messages by id than allowed in a single request. - - By default, max messages that can be fetched in a single - request is 20. - """ - url = (self.message_url + '?ids=' + - ','.join(str(i) for i in - range(self.limits.max_messages_per_page + 1))) - result = self.client.get(url) - - self.assertEqual(400, result.status_code) - - test_message_bulk_get_negative.tags = ['negative'] - - def test_get_messages_malformed_marker(self): - """Get messages with non-existing marker.""" - url = self.message_url + '?marker=invalid' - - result = self.client.get(url) - self.assertEqual(204, result.status_code) - - test_get_messages_malformed_marker.tags = ['negative'] - - @ddt.data(None, '1234', 'aa2-bb3', - '103e09c6-31b7-11e3-86bc-b8ca3ad0f5d81', - '103e09c6-31b7-11e3-86bc-b8ca3ad0f5d') - def test_get_messages_invalid_client_id(self, client_id): - """Get messages with invalid client id.""" - url = self.message_url - - header = helpers.create_zaqar_headers(self.cfg) - header['Client-ID'] = client_id - - result = self.client.get(url, headers=header) - self.assertEqual(400, result.status_code) - - test_get_messages_invalid_client_id.tags = ['negative'] - - def test_query_non_existing_message(self): - """Get Non Existing Message.""" - path = '/non-existing-message' - result = self.client.get(path) - self.assertEqual(404, result.status_code) - - test_query_non_existing_message.tags = ['negative'] - - def test_query_non_existing_message_set(self): - """Get Set of Non Existing Messages.""" - path = '?ids=not_there1,not_there2' - result = self.client.get(path) - self.assertEqual(204, result.status_code) - - test_query_non_existing_message_set.tags = ['negative'] - - def test_delete_non_existing_message(self): - """Delete Non Existing Message.""" - path = '/non-existing-message' - result = self.client.delete(path) - self.assertEqual(204, result.status_code) - - test_delete_non_existing_message.tags = ['negative'] diff --git a/zaqar/tests/functional/wsgi/v1/test_queues.py b/zaqar/tests/functional/wsgi/v1/test_queues.py deleted file mode 100644 index 50a0f772..00000000 --- a/zaqar/tests/functional/wsgi/v1/test_queues.py +++ /dev/null @@ -1,440 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -import ddt -import six - -from zaqar.tests.functional import base # noqa -from zaqar.tests.functional import helpers - - -class NamedBinaryStr(six.binary_type): - - """Wrapper for six.binary_type to facilitate overriding __name__.""" - - -class NamedUnicodeStr(six.text_type): - - """Unicode string look-alike to facilitate overriding __name__.""" - - def __init__(self, value): - self._value = value - - def __str__(self): - return self._value - - def encode(self, enc): - return self._value.encode(enc) - - def __format__(self, formatstr): - """Workaround for ddt bug. - - DDT will always call __format__ even when __name__ exists, - which blows up for Unicode strings under Py2. - """ - return '' - - -class NamedDict(dict): - - """Wrapper for dict to facilitate overriding __name__.""" - - -def annotated(test_name, test_input): - if isinstance(test_input, dict): - annotated_input = NamedDict(test_input) - elif isinstance(test_input, six.text_type): - annotated_input = NamedUnicodeStr(test_input) - else: - annotated_input = NamedBinaryStr(test_input) - - setattr(annotated_input, '__name__', test_name) - return annotated_input - - -@ddt.ddt -class TestInsertQueue(base.V1FunctionalTestBase): - - """Tests for Insert queue.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestInsertQueue, self).setUp() - self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, - "v1") - - self.header = helpers.create_zaqar_headers(self.cfg) - self.headers_response_empty = {'location'} - self.client.set_base_url(self.base_url) - self.header = helpers.create_zaqar_headers(self.cfg) - - @ddt.data('qtestqueue', 'TESTqueue', 'hyphen-name', '_undersore', - annotated('test_insert_queue_long_name', 'i' * 64)) - def test_insert_queue(self, queue_name): - """Create Queue.""" - self.url = self.base_url + '/queues/' + queue_name - self.addCleanup(self.client.delete, self.url) - - result = self.client.put(self.url) - self.assertEqual(201, result.status_code) - - response_headers = set(result.headers.keys()) - self.assertIsSubset(self.headers_response_empty, response_headers) - - self.url = self.url + '/metadata' - result = self.client.get(self.url) - self.assertEqual(200, result.status_code) - self.assertEqual({}, result.json()) - - test_insert_queue.tags = ['positive', 'smoke'] - - @ddt.data(annotated('test_insert_queue_non_ascii_name', - u'\u6c49\u5b57\u6f22\u5b57'), - '@$@^qw', - annotated('test_insert_queue_invalid_name_length', 'i' * 65)) - def test_insert_queue_invalid_name(self, queue_name): - """Create Queue.""" - if six.PY2 and isinstance(queue_name, NamedUnicodeStr): - queue_name = queue_name.encode('utf-8') - - self.url = self.base_url + '/queues/' + queue_name - self.addCleanup(self.client.delete, self.url) - - result = self.client.put(self.url) - self.assertEqual(400, result.status_code) - - self.url = self.url + '/metadata' - result = self.client.get(self.url) - self.assertEqual(400, result.status_code) - - test_insert_queue_invalid_name.tags = ['negative'] - - def test_insert_queue_header_plaintext(self): - """Insert Queue with 'Accept': 'plain/text'.""" - path = '/queues/plaintextheader' - self.addCleanup(self.client.delete, path) - - header = {"Accept": 'plain/text'} - result = self.client.put(path, headers=header) - self.assertEqual(406, result.status_code) - - test_insert_queue_header_plaintext.tags = ['negative'] - - def test_insert_queue_header_asterisk(self): - """Insert Queue with 'Accept': '*/*'.""" - path = '/queues/asteriskinheader' - headers = {"Accept": '*/*'} - self.addCleanup(self.client.delete, url=path, headers=headers) - - result = self.client.put(path, headers=headers) - self.assertEqual(201, result.status_code) - - test_insert_queue_header_asterisk.tags = ['positive'] - - def test_insert_queue_with_metadata(self): - """Insert queue with a non-empty request body.""" - self.url = self.base_url + '/queues/hasmetadata' - doc = {"queue": "Has Metadata"} - self.addCleanup(self.client.delete, self.url) - result = self.client.put(self.url, data=doc) - - self.assertEqual(201, result.status_code) - - self.url = self.base_url + '/queues/hasmetadata/metadata' - result = self.client.get(self.url) - - self.assertEqual(200, result.status_code) - self.assertEqual({}, result.json()) - - test_insert_queue_with_metadata.tags = ['negative'] - - def tearDown(self): - super(TestInsertQueue, self).tearDown() - - -@ddt.ddt -class TestQueueMetaData(base.V1FunctionalTestBase): - - """Tests for queue metadata.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestQueueMetaData, self).setUp() - - self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, - "v1") - - self.queue_url = self.base_url + '/queues/{0}'.format(uuid.uuid1()) - self.client.put(self.queue_url) - - self.queue_metadata_url = self.queue_url + '/metadata' - self.client.set_base_url(self.queue_metadata_url) - - @ddt.data({}, - {'@queue': 'Top Level field with @'}, - annotated('test_insert_queue_metadata_unicode', { - u'\u6c49\u5b57': u'Unicode: \u6c49\u5b57' - }), - {'queue': '#$%^&Apple'}, - annotated('test_insert_queue_metadata_huge', - {"queue": "i" * 65000})) - def test_insert_queue_metadata(self, doc): - """Insert Queue with empty json.""" - result = self.client.put(data=doc) - self.assertEqual(204, result.status_code) - - result = self.client.get() - self.assertEqual(200, result.status_code) - - doc_decoded = {} - for k, v in doc.items(): - if isinstance(k, six.binary_type): - k = k.decode('utf-8') - - if isinstance(v, six.binary_type): - v = v.decode('utf-8') - - doc_decoded[k] = v - - self.assertEqual(result.json(), doc_decoded) - - test_insert_queue_metadata.tags = ['smoke', 'positive'] - - @ddt.data('not_a_dict', - annotated('test_insert_queue_invalid_metadata_huge', - {"queue": "i" * 65537})) - def test_insert_queue_invalid_metadata(self, doc): - """Insert invalid metadata.""" - - result = self.client.put(data=doc) - self.assertEqual(400, result.status_code) - - test_insert_queue_invalid_metadata.tags = ['negative'] - - def tearDown(self): - super(TestQueueMetaData, self).tearDown() - self.client.delete(self.queue_url) - - -@ddt.ddt -class TestQueueMisc(base.V1FunctionalTestBase): - - server_class = base.ZaqarServer - - def setUp(self): - super(TestQueueMisc, self).setUp() - - self.base_url = self.cfg.zaqar.url - self.client.set_base_url(self.base_url) - - self.queue_url = (self.base_url + '/{0}/queues/{1}' - .format("v1", uuid.uuid1())) - - def test_list_queues(self): - """List Queues.""" - - self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - result = self.client.get('/{0}/queues' - .format('v1')) - - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'queue_list') - - test_list_queues.tags = ['smoke', 'positive'] - - def test_list_queues_detailed(self): - """List Queues with detailed = True.""" - - self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - - params = {'detailed': True} - result = self.client.get('/{0}/queues' - .format("v1"), - params=params) - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'queue_list') - - response_keys = result.json()['queues'][0].keys() - self.assertIn('metadata', response_keys) - - test_list_queues_detailed.tags = ['smoke', 'positive'] - - @ddt.data(0, -1, 1001) - def test_list_queue_invalid_limit(self, limit): - """List Queues with a limit value that is not allowed.""" - - params = {'limit': limit} - result = self.client.get('/{0}/queues' - .format("v1"), - params=params) - self.assertEqual(400, result.status_code) - - test_list_queue_invalid_limit.tags = ['negative'] - - def test_check_health(self): - """Test health endpoint.""" - - result = self.client.get('/{0}/health' - .format("v1")) - self.assertEqual(204, result.status_code) - - test_check_health.tags = ['positive'] - - def test_check_queue_exists(self): - """Checks if queue exists.""" - - self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - result = self.client.get(self.queue_url) - self.assertEqual(204, result.status_code) - - result = self.client.head(self.queue_url) - self.assertEqual(204, result.status_code) - - test_check_queue_exists.tags = ['positive'] - - def test_check_queue_exists_negative(self): - """Checks non-existing queue.""" - path = '/{0}/queues/nonexistingqueue'.format("v1") - result = self.client.get(path) - self.assertEqual(404, result.status_code) - - result = self.client.head(path) - self.assertEqual(404, result.status_code) - - test_check_queue_exists_negative.tags = ['negative'] - - def test_get_queue_malformed_marker(self): - """List queues with invalid marker.""" - - path = '/{0}/queues?marker=zzz'.format("v1") - result = self.client.get(path) - self.assertEqual(204, result.status_code) - - test_get_queue_malformed_marker.tags = ['negative'] - - def test_get_stats_empty_queue(self): - """Get queue stats on an empty queue.""" - - result = self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - self.assertEqual(201, result.status_code) - - stats_url = self.queue_url + '/stats' - - # Get stats on an empty queue - result = self.client.get(stats_url) - self.assertEqual(200, result.status_code) - - expected_response = {'messages': - {'claimed': 0, 'total': 0, 'free': 0}} - self.assertEqual(expected_response, result.json()) - - test_get_stats_empty_queue.tags = ['positive'] - - @ddt.data(0, 1) - def test_get_queue_stats_claimed(self, claimed): - """Get stats on a queue.""" - result = self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - self.assertEqual(201, result.status_code) - - # Post Messages to the test queue - doc = helpers.create_message_body( - messagecount=self.limits.max_messages_per_claim_or_pop) - - message_url = self.queue_url + '/messages' - result = self.client.post(message_url, data=doc) - self.assertEqual(201, result.status_code) - - if claimed > 0: - claim_url = self.queue_url + '/claims?limit=' + str(claimed) - doc = {'ttl': 300, 'grace': 300} - result = self.client.post(claim_url, data=doc) - self.assertEqual(201, result.status_code) - - # Get stats on the queue. - stats_url = self.queue_url + '/stats' - result = self.client.get(stats_url) - self.assertEqual(200, result.status_code) - - self.assertQueueStats(result.json(), claimed) - - test_get_queue_stats_claimed.tags = ['positive'] - - def tearDown(self): - super(TestQueueMisc, self).tearDown() - - -class TestQueueNonExisting(base.V1FunctionalTestBase): - - """Test Actions on non existing queue.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestQueueNonExisting, self).setUp() - self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, "v1") - self.queue_url = (self.base_url + - '/queues/0a5b1b85-4263-11e3-b034-28cfe91478b9') - - self.client.set_base_url(self.queue_url) - - self.header = helpers.create_zaqar_headers(self.cfg) - self.headers_response_empty = {'location'} - self.header = helpers.create_zaqar_headers(self.cfg) - - def test_get_queue(self): - """Get non existing Queue.""" - result = self.client.get() - self.assertEqual(404, result.status_code) - - def test_get_stats(self): - """Get stats on non existing Queue.""" - result = self.client.get('/stats') - self.assertEqual(404, result.status_code) - - def test_get_metadata(self): - """Get metadata on non existing Queue.""" - result = self.client.get('/metadata') - self.assertEqual(404, result.status_code) - - def test_get_messages(self): - """Get messages on non existing Queue.""" - result = self.client.get('/messages') - self.assertEqual(204, result.status_code) - - def test_post_messages(self): - """Post messages to a non existing Queue.""" - doc = [{"ttl": 200, "body": {"Home": ""}}] - result = self.client.post('/messages', data=doc) - self.assertEqual(404, result.status_code) - - def test_claim_messages(self): - """Claim messages from a non existing Queue.""" - doc = {"ttl": 200, "grace": 300} - result = self.client.post('/claims', data=doc) - self.assertEqual(204, result.status_code) - - def test_delete_queue(self): - """Delete non existing Queue.""" - result = self.client.delete() - self.assertEqual(204, result.status_code) diff --git a/zaqar/tests/functional/wsgi/v1_1/__init__.py b/zaqar/tests/functional/wsgi/v1_1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/functional/wsgi/v1_1/test_claims.py b/zaqar/tests/functional/wsgi/v1_1/test_claims.py deleted file mode 100644 index edcc29e8..00000000 --- a/zaqar/tests/functional/wsgi/v1_1/test_claims.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -import ddt - -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -@ddt.ddt -class TestClaims(base.V1_1FunctionalTestBase): - """Tests for Claims.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestClaims, self).setUp() - - self.headers = helpers.create_zaqar_headers(self.cfg) - self.client.headers = self.headers - - self.queue = uuid.uuid1() - self.queue_url = ("{url}/{version}/queues/{queue}".format( - url=self.cfg.zaqar.url, - version="v1.1", - queue=self.queue)) - - self.client.put(self.queue_url) - - self.claim_url = self.queue_url + '/claims' - self.client.set_base_url(self.claim_url) - - # Post Messages - url = self.queue_url + '/messages' - doc = helpers.create_message_body_v1_1( - messagecount=self.limits.max_messages_per_page) - - for i in range(10): - self.client.post(url, data=doc) - - @ddt.data({}, {'limit': 2}) - def test_claim_messages(self, params): - """Claim messages.""" - message_count = params.get('limit', - self.limits.max_messages_per_claim_or_pop) - - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(params=params, data=doc) - self.assertEqual(201, result.status_code) - self.assertSchema(result.json(), 'claim_create') - - actual_message_count = len(result.json()['messages']) - self.assertMessageCount(actual_message_count, message_count) - - response_headers = set(result.headers.keys()) - self.assertIsSubset(self.headers_response_with_body, response_headers) - - test_claim_messages.tags = ['smoke', 'positive'] - - def test_query_claim(self): - """Query Claim.""" - params = {'limit': 1} - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(params=params, data=doc) - location = result.headers['Location'] - - url = self.cfg.zaqar.url + location - - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - test_query_claim.tags = ['smoke', 'positive'] - - @ddt.data({}, {"grace": 100}) - def test_claim_default_ttl(self, doc): - """Create claim with default TTL and grace values.""" - params = {'limit': 1} - - result = self.client.post(params=params, data=doc) - self.assertEqual(201, result.status_code) - - location = result.headers['Location'] - - url = self.cfg.zaqar.url + location - - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - default_ttl = result.json()['ttl'] - self.assertEqual(self.resource_defaults.claim_ttl, default_ttl) - - test_claim_default_ttl.tags = ['smoke', 'positive'] - - def test_claim_more_than_allowed(self): - """Claim more than max allowed per request. - - Zaqar allows a maximum of 20 messages per claim by default. - """ - params = {"limit": self.limits.max_messages_per_claim_or_pop + 1} - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(params=params, data=doc) - self.assertEqual(400, result.status_code) - - test_claim_more_than_allowed.tags = ['negative'] - - def test_claim_patch(self): - """Update Claim.""" - - # Test Setup - Post Claim - doc = {"ttl": 300, "grace": 400} - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Patch Claim - claim_location = result.headers['Location'] - url = self.cfg.zaqar.url + claim_location - doc_updated = {"ttl": 300, 'grace': 60} - - result = self.client.patch(url, data=doc_updated) - self.assertEqual(204, result.status_code) - - # verify that the claim TTL is updated - result = self.client.get(url) - new_ttl = result.json()['ttl'] - self.assertEqual(doc_updated['ttl'], new_ttl) - - test_claim_patch.tags = ['smoke', 'positive'] - - def test_delete_claimed_message(self): - """Delete message belonging to a Claim.""" - # Test Setup - Post claim - doc = {"ttl": 60, "grace": 60} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Delete Claimed Messages - for rst in result.json()['messages']: - href = rst['href'] - url = self.cfg.zaqar.url + href - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - test_delete_claimed_message.tags = ['smoke', 'positive'] - - def test_claim_release(self): - """Release Claim.""" - doc = {"ttl": 300, "grace": 100} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Extract claim location and construct the claim URL. - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - - # Release Claim. - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - test_claim_release.tags = ['smoke', 'positive'] - - @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) - def test_claim_invalid_ttl(self, ttl): - """Post Claim with invalid TTL. - - The request JSON body will have a TTL value - outside the allowed range.Allowed ttl values is - 60 <= ttl <= 43200. - """ - doc = {"ttl": ttl, "grace": 100} - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_claim_invalid_ttl.tags = ['negative'] - - @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) - def test_claim_invalid_grace(self, grace): - """Post Claim with invalid grace. - - The request JSON body will have a grace value - outside the allowed range.Allowed grace values is - 60 <= grace <= 43200. - """ - doc = {"ttl": 100, "grace": grace} - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_claim_invalid_grace.tags = ['negative'] - - @ddt.data(0, -100, 30, 10000000000000000000) - def test_claim_invalid_limit(self, grace): - """Post Claim with invalid limit. - - The request url will have a limit outside the allowed range. - Allowed limit values are 0 < limit <= 20(default max). - """ - doc = {"ttl": 100, "grace": grace} - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_claim_invalid_limit.tags = ['negative'] - - @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) - def test_patch_claim_invalid_ttl(self, ttl): - """Patch Claim with invalid TTL. - - The request JSON body will have a TTL value - outside the allowed range.Allowed ttl values is - 60 <= ttl <= 43200. - """ - doc = {"ttl": 100, "grace": 100} - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Extract claim location and construct the claim URL. - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - - # Patch Claim. - doc = {"ttl": ttl} - result = self.client.patch(url, data=doc) - self.assertEqual(400, result.status_code) - - test_patch_claim_invalid_ttl.tags = ['negative'] - - def test_query_non_existing_claim(self): - """Query Non Existing Claim.""" - path = '/non-existing-claim' - result = self.client.get(path) - self.assertEqual(404, result.status_code) - - test_query_non_existing_claim.tags = ['negative'] - - def test_patch_non_existing_claim(self): - """Patch Non Existing Claim.""" - path = '/non-existing-claim' - doc = {"ttl": 400} - result = self.client.patch(path, data=doc) - self.assertEqual(404, result.status_code) - - test_patch_non_existing_claim.tags = ['negative'] - - def test_delete_non_existing_claim(self): - """Patch Non Existing Claim.""" - path = '/non-existing-claim' - result = self.client.delete(path) - self.assertEqual(204, result.status_code) - - test_delete_non_existing_claim.tags = ['negative'] - - def tearDown(self): - """Delete Queue after Claim Test.""" - super(TestClaims, self).tearDown() - self.client.delete(self.queue_url) diff --git a/zaqar/tests/functional/wsgi/v1_1/test_health.py b/zaqar/tests/functional/wsgi/v1_1/test_health.py deleted file mode 100644 index 5e46a3bb..00000000 --- a/zaqar/tests/functional/wsgi/v1_1/test_health.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2014 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -class TestHealth(base.V1_1FunctionalTestBase): - - server_class = base.ZaqarAdminServer - config_file = 'wsgi_mongodb_pooled.conf' - - def setUp(self): - super(TestHealth, self).setUp() - self.base_url = ("{url}/{version}".format( - url=self.cfg.zaqar.url, - version="v1.1" - )) - self.cfg.zaqar.version = "v1.1" - - self.headers = helpers.create_zaqar_headers(self.cfg) - self.client.headers = self.headers - - self.client.set_base_url(self.base_url) - - def test_health_with_pool(self): - # FIXME(flwang): Please use mongodb after the sqlalchemy is disabled - # as pool node and the mongodb is working on gate successfully. - doc = helpers.create_pool_body( - weight=10, - uri=self.mconf['drivers:management_store:mongodb'].uri, - options=dict(database='zaqar_test_pooled_1') - ) - - pool_name = "pool_1" - - result = self.client.put('/pools/' + pool_name, data=doc) - self.assertEqual(201, result.status_code) - - queue_name = 'fake_queue' - result = self.client.put('/queues/' + queue_name) - self.assertEqual(201, result.status_code) - - sample_messages = {'messages': [ - {'body': 239, 'ttl': 999}, - {'body': {'key': 'value'}, 'ttl': 888} - ]} - - result = self.client.post('/queues/%s/messages' % queue_name, - data=sample_messages) - self.assertEqual(201, result.status_code) - - claim_metadata = {'ttl': 100, 'grace': 300} - - result = self.client.post('/queues/%s/claims' % queue_name, - data=claim_metadata) - self.assertEqual(201, result.status_code) - - response = self.client.get('/health') - self.assertEqual(200, response.status_code) - health = response.json() - - self.assertTrue(health['catalog_reachable']) - self.assertTrue(health[pool_name]['storage_reachable']) - op_status = health[pool_name]['operation_status'] - for op in op_status.keys(): - self.assertTrue(op_status[op]['succeeded']) - - message_volume = health[pool_name]['message_volume'] - self.assertEqual(2, message_volume['claimed']) - self.assertEqual(0, message_volume['free']) - self.assertEqual(2, message_volume['total']) diff --git a/zaqar/tests/functional/wsgi/v1_1/test_messages.py b/zaqar/tests/functional/wsgi/v1_1/test_messages.py deleted file mode 100644 index ba89c480..00000000 --- a/zaqar/tests/functional/wsgi/v1_1/test_messages.py +++ /dev/null @@ -1,528 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division - -import json -import uuid - -import ddt - -from zaqar.common import consts -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -@ddt.ddt -class TestMessages(base.V1_1FunctionalTestBase): - """Message Tests Specific to V1.1.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestMessages, self).setUp() - - self.queue = uuid.uuid1() # Generate a random queue ID - self.queue_url = ("{url}/{version}/queues/{queue}".format( - url=self.cfg.zaqar.url, - version="v1.1", - queue=self.queue)) - - self.headers = helpers.create_zaqar_headers(self.cfg) - self.client.headers = self.headers - - self.client.put(self.queue_url) # Create the queue - self.message_url = self.queue_url + '/messages' - self.client.set_base_url(self.message_url) - - def tearDown(self): - self.client.delete(self.queue_url) # Remove the queue - super(TestMessages, self).tearDown() - - def _post_large_bulk_insert(self, offset): - """Insert just under than max allowed messages.""" - - message1 = {"body": '', "ttl": 300} - message2 = {"body": '', "ttl": 120} - - doc = {'messages': [message1, message2]} - overhead = len(json.dumps(doc)) - - half_size = (self.limits.max_messages_post_size - overhead) // 2 - message1['body'] = helpers.generate_random_string(half_size) - message2['body'] = helpers.generate_random_string(half_size + offset) - - return self.client.post(data=doc) - - def test_message_single_insert(self): - """Insert Single Message into the Queue. - - This test also verifies that claimed messages are - retuned (or not) depending on the include_claimed flag. - """ - doc = helpers.create_message_body_v1_1(messagecount=1) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - response_headers = set(result.headers.keys()) - self.assertIsSubset(self.headers_response_with_body, response_headers) - - # GET on posted message - href = result.json()['resources'][0] - url = self.cfg.zaqar.url + href - - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - # Compare message metadata - result_body = result.json()['body'] - posted_metadata = doc['messages'][0]['body'] - self.assertEqual(posted_metadata, result_body) - - # Post a claim & verify the include_claimed flag. - url = self.queue_url + '/claims' - doc = {"ttl": 300, "grace": 100} - result = self.client.post(url, data=doc) - self.assertEqual(201, result.status_code) - - params = {'include_claimed': True, - 'echo': True} - result = self.client.get(params=params) - self.assertEqual(200, result.status_code) - - response_message_body = result.json()["messages"][0]["body"] - self.assertEqual(posted_metadata, response_message_body) - - # By default, include_claimed = false - result = self.client.get(self.message_url) - self.assertEqual(200, result.status_code) - - test_message_single_insert.tags = ['smoke', 'positive'] - - def test_message_bulk_insert(self): - """Bulk Insert Messages into the Queue.""" - message_count = self.limits.max_messages_per_page - doc = helpers.create_message_body_v1_1(messagecount=message_count) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # GET on posted messages - location = result.headers['location'] - url = self.cfg.zaqar.url + location - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - # Verify that the response json schema matches the expected schema - self.assertSchema(result.json(), consts.MESSAGE_GET_MANY) - - self.skipTest('Bug #1273335 - Get set of messages returns wrong hrefs ' - '(happens randomly)') - - # Compare message metadata - result_body = [msg['body'] for msg in result.json()['messages']] - result_body.sort() - - posted_metadata = [msg['body'] for msg in doc['messages']] - posted_metadata.sort() - - self.assertEqual(posted_metadata, result_body) - - test_message_bulk_insert.tags = ['smoke', 'positive'] - - def test_message_default_ttl(self): - """Insert Single Message into the Queue using the default TTL.""" - doc = helpers.create_message_body_v1_1(messagecount=1, - default_ttl=True) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # GET on posted message - href = result.json()['resources'][0] - url = self.cfg.zaqar.url + href - - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - # Compare message metadata - default_ttl = result.json()['ttl'] - self.assertEqual(self.resource_defaults.message_ttl, default_ttl) - - test_message_default_ttl.tags = ['smoke', 'positive'] - - @ddt.data({}, {'limit': 5}) - def test_get_message(self, params): - """Get Messages.""" - - # Note(abettadapur): This will now return 200s and []. - # Needs to be addressed when feature patch goes in - self.skipTest("Not supported") - expected_msg_count = params.get('limit', - self.limits.max_messages_per_page) - - # Test Setup - doc = helpers.create_message_body_v1_1( - messagecount=self.limits.max_messages_per_page) - - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - url = '' - params['echo'] = True - - # Follow the hrefs & perform GET, till the end of messages i.e. http - # 204 - while result.status_code in [201, 200]: - result = self.client.get(url, params=params) - self.assertIn(result.status_code, [200, 204]) - - if result.status_code == 200: - actual_msg_count = len(result.json()['messages']) - self.assertMessageCount(actual_msg_count, expected_msg_count) - - href = result.json()['links'][0]['href'] - url = self.cfg.zaqar.url + href - - self.assertEqual(204, result.status_code) - - test_get_message.tags = ['smoke', 'positive'] - - def test_message_delete(self): - """Delete Message.""" - # Test Setup - doc = helpers.create_message_body_v1_1(messagecount=1) - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - - # Delete posted message - href = result.json()['resources'][0] - url = self.cfg.zaqar.url + href - - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - result = self.client.get(url) - self.assertEqual(404, result.status_code) - - test_message_delete.tags = ['smoke', 'positive'] - - def test_message_bulk_delete(self): - """Bulk Delete Messages.""" - doc = helpers.create_message_body_v1_1(messagecount=10) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Delete posted messages - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - result = self.client.get(url) - self.assertEqual(404, result.status_code) - - test_message_bulk_delete.tags = ['smoke', 'positive'] - - def test_message_delete_nonexisting(self): - """Delete non-existing Messages.""" - result = self.client.delete('/non-existing') - - self.assertEqual(204, result.status_code) - - test_message_delete_nonexisting.tags = ['negative'] - - def test_message_partial_delete(self): - """Delete Messages will be partially successful.""" - doc = helpers.create_message_body_v1_1(messagecount=3) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Delete posted message - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - url += ',nonexisting' - result = self.client.delete(url) - self.assertEqual(204, result.status_code) - - test_message_partial_delete.tags = ['negative'] - - @ddt.data(5, 1) - def test_messages_pop(self, limit=5): - """Pop messages from a queue.""" - doc = helpers.create_message_body_v1_1(messagecount=limit) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Pop messages - url = self.message_url + '?pop=' + str(limit) - - result = self.client.delete(url) - self.assertEqual(200, result.status_code) - - params = {'echo': True} - - result = self.client.get(self.message_url, params=params) - self.assertEqual(200, result.status_code) - - messages = result.json()['messages'] - self.assertEqual([], messages) - - test_messages_pop.tags = ['smoke', 'positive'] - - @ddt.data(10000000, 0, -1) - def test_messages_pop_invalid(self, limit): - """Pop messages from a queue.""" - doc = helpers.create_message_body_v1_1( - messagecount=self.limits.max_messages_per_page) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Pop messages - url = self.message_url + '?pop=' + str(limit) - - result = self.client.delete(url) - self.assertEqual(400, result.status_code) - - params = {'echo': True} - result = self.client.get(self.message_url, params=params) - self.assertEqual(200, result.status_code) - - messages = result.json()['messages'] - self.assertNotEqual(messages, []) - - test_messages_pop_invalid.tags = ['smoke', 'negative'] - - def test_messages_delete_pop_and_id(self): - """Delete messages with pop & id params in the request.""" - doc = helpers.create_message_body_v1_1( - messagecount=1) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - location = result.headers['Location'] - - # Pop messages - url = self.cfg.zaqar.url + location + '&pop=1' - - result = self.client.delete(url) - self.assertEqual(400, result.status_code) - - params = {'echo': True} - - result = self.client.get(self.message_url, params=params) - self.assertEqual(200, result.status_code) - - messages = result.json()['messages'] - self.assertNotEqual(messages, []) - - test_messages_delete_pop_and_id.tags = ['smoke', 'negative'] - - def test_messages_pop_empty_queue(self): - """Pop messages from an empty queue.""" - url = self.message_url + '?pop=2' - - result = self.client.delete(url) - self.assertEqual(200, result.status_code) - - messages = result.json()['messages'] - self.assertEqual([], messages) - - test_messages_pop_empty_queue.tags = ['smoke', 'positive'] - - def test_messages_pop_one(self): - """Pop single messages from a queue.""" - doc = helpers.create_message_body_v1_1( - messagecount=self.limits.max_messages_per_page) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Pop Single Message - url = self.message_url + '?pop=1' - - result = self.client.delete(url) - self.assertEqual(200, result.status_code) - - # Get messages from the queue & verify message count - params = {'echo': True, 'limit': self.limits.max_messages_per_page} - - result = self.client.get(self.message_url, params=params) - self.assertEqual(200, result.status_code) - - expected_msg_count = self.limits.max_messages_per_page - 1 - actual_msg_count = len(result.json()['messages']) - self.assertEqual(expected_msg_count, actual_msg_count) - - test_messages_pop_one.tags = ['smoke', 'positive'] - - def test_message_partial_get(self): - """Get Messages will be partially successful.""" - doc = helpers.create_message_body_v1_1(messagecount=3) - result = self.client.post(data=doc) - - self.assertEqual(201, result.status_code) - - # Get posted message and a nonexisting message - location = result.headers['Location'] - url = self.cfg.zaqar.url + location - url += ',nonexisting' - result = self.client.get(url) - self.assertEqual(200, result.status_code) - - test_message_partial_get.tags = ['negative'] - - @ddt.data(-10, -1, 0) - def test_message_bulk_insert_large_bodies(self, offset): - """Insert just under than max allowed messages.""" - result = self._post_large_bulk_insert(offset) - self.assertEqual(201, result.status_code) - - test_message_bulk_insert_large_bodies.tags = ['positive'] - - @ddt.data(1, 10) - def test_message_bulk_insert_large_bodies_(self, offset): - """Insert just under than max allowed messages.""" - result = self._post_large_bulk_insert(offset) - self.assertEqual(400, result.status_code) - - test_message_bulk_insert_large_bodies_.tags = ['negative'] - - def test_message_bulk_insert_oversized(self): - """Insert more than max allowed size.""" - - doc = '[{{"body": "{0}", "ttl": 300}}, {{"body": "{1}", "ttl": 120}}]' - overhead = len(doc.format('', '')) - - half_size = (self.limits.max_messages_post_size - overhead) // 2 - doc = doc.format(helpers.generate_random_string(half_size), - helpers.generate_random_string(half_size + 1)) - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) - - test_message_bulk_insert_oversized.tags = ['negative'] - - @ddt.data(10000000000000000000, -100, 0, 30, -10000000000000000000) - def test_message_get_invalid_limit(self, limit): - """Get Messages with invalid value for limit. - - Allowed values for limit are 0 < limit <= 20(configurable). - """ - params = {'limit': limit} - result = self.client.get(params=params) - self.assertEqual(400, result.status_code) - - test_message_get_invalid_limit.tags = ['negative'] - - def test_message_bulk_delete_negative(self): - """Delete more messages than allowed in a single request. - - By default, max messages that can be deleted in a single - request is 20. - """ - url = (self.message_url + '?ids=' - + ','.join(str(i) for i in - range(self.limits.max_messages_per_page + 1))) - result = self.client.delete(url) - - self.assertEqual(400, result.status_code) - - test_message_bulk_delete_negative.tags = ['negative'] - - def test_message_bulk_get_negative(self): - """GET more messages by id than allowed in a single request. - - By default, max messages that can be fetched in a single - request is 20. - """ - - url = (self.message_url + '?ids=' - + ','.join(str(i) for i in - range(self.limits.max_messages_per_page + 1))) - - result = self.client.get(url) - - self.assertEqual(400, result.status_code) - - test_message_bulk_get_negative.tags = ['negative'] - - def test_get_messages_malformed_marker(self): - """Get messages with non-existing marker.""" - url = self.message_url + '?marker=invalid' - - result = self.client.get(url, headers=self.headers) - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'message_list') - - test_get_messages_malformed_marker.tags = ['negative'] - - @ddt.data(None, '1234', 'aa2-bb3', - '103e09c6-31b7-11e3-86bc-b8ca3ad0f5d81', - '103e09c6-31b7-11e3-86bc-b8ca3ad0f5d') - def test_get_messages_invalid_client_id(self, client_id): - """Get messages with invalid client id.""" - url = self.message_url - - header = helpers.create_zaqar_headers(self.cfg) - header['Client-ID'] = client_id - - result = self.client.get(url, headers=header) - self.assertEqual(400, result.status_code) - - test_get_messages_invalid_client_id.tags = ['negative'] - - def test_query_non_existing_message(self): - """Get Non Existing Message.""" - path = '/non-existing-message' - result = self.client.get(path) - self.assertEqual(404, result.status_code) - - test_query_non_existing_message.tags = ['negative'] - - def test_query_non_existing_message_set(self): - """Get Set of Non Existing Messages.""" - path = '?ids=not_there1,not_there2' - result = self.client.get(path) - self.assertEqual(404, result.status_code) - - test_query_non_existing_message_set.tags = ['negative'] - - def test_delete_non_existing_message(self): - """Delete Non Existing Message.""" - path = '/non-existing-message' - result = self.client.delete(path) - self.assertEqual(204, result.status_code) - - test_delete_non_existing_message.tags = ['negative'] - - def test_message_bad_header_single_insert(self): - """Insert Single Message into the Queue. - - This should fail because of the lack of a Client-ID header - """ - - self.skipTest("Not supported") - del self.client.headers["Client-ID"] - doc = helpers.create_message_body_v1_1(messagecount=1) - - result = self.client.post(data=doc) - self.assertEqual(400, result.status_code) diff --git a/zaqar/tests/functional/wsgi/v1_1/test_pools.py b/zaqar/tests/functional/wsgi/v1_1/test_pools.py deleted file mode 100644 index e553637f..00000000 --- a/zaqar/tests/functional/wsgi/v1_1/test_pools.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import ddt - -from zaqar import tests as testing -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -@ddt.ddt -class TestPools(base.V1_1FunctionalTestBase): - - server_class = base.ZaqarAdminServer - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestPools, self).setUp() - - self.pool_url = ("{url}/{version}/pools".format( - url=self.cfg.zaqar.url, - version="v1.1" - )) - self.cfg.zaqar.version = "v1.1" - - self.headers = helpers.create_zaqar_headers(self.cfg) - self.client.headers = self.headers - - self.client.set_base_url(self.pool_url) - - @ddt.data( - { - 'name': "newpool", - 'weight': 10 - } - ) - def test_insert_pool(self, params): - """Test the registering of one pool.""" - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - - pool_name = params.get('name', "newpool") - self.addCleanup(self.client.delete, url='/'+pool_name) - - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(201, result.status_code) - - # Test existence - result = self.client.get('/'+pool_name) - self.assertEqual(200, result.status_code) - - @ddt.data( - { - 'name': "newpool", - 'weight': 10 - } - ) - def test_pool_details(self, params): - """Get the details of a pool. Assert the respective schema.""" - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - - pool_name = params.get('name', "newpool") - self.addCleanup(self.client.delete, url='/'+pool_name) - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(201, result.status_code) - - # Test existence - result = self.client.get('/'+pool_name+'?detailed=true') - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'pool_get_detail') - - @ddt.data( - { - 'name': "newpool", - 'weight': 10, - } - ) - def test_delete_pool(self, params): - """Create a pool, then delete it. - - Make sure operation is successful. - """ - - # Create the pool - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - - pool_name = params.get('name', "newpool") - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(201, result.status_code) - - # Make sure it exists - result = self.client.get('/'+pool_name) - self.assertEqual(200, result.status_code) - - # Delete it - result = self.client.delete('/'+pool_name) - self.assertEqual(204, result.status_code) - - @ddt.data( - { - 'name': "newpool", - 'weight': 10, - } - ) - def test_list_pools(self, params): - """Add a pool. Get the list of all the pools. - - Assert respective schema - """ - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - pool_name = params.get('name', "newpool") - self.addCleanup(self.client.delete, url='/'+pool_name) - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(201, result.status_code) - - result = self.client.get() - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'pool_list') - - @ddt.data( - { - 'name': "newpool", - 'weight': 10, - } - ) - def test_patch_pool(self, params): - """Create a pool. Issue a patch command, - - make sure command was successful. Check details to be sure. - """ - - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - pool_name = params.get('name', "newpool") - self.addCleanup(self.client.delete, url='/'+pool_name) - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(201, result.status_code) - # Update that pool - - patchdoc = helpers.create_pool_body( - weight=5, - uri=self.mongodb_url - ) - result = self.client.patch('/'+pool_name, data=patchdoc) - self.assertEqual(200, result.status_code) - - # Get the pool, check update# - result = self.client.get('/'+pool_name) - self.assertEqual(200, result.status_code) - self.assertEqual(5, result.json()["weight"]) - - @ddt.data( - { - 'name': "newpool", - 'weight': 10, - } - ) - def test_patch_pool_bad_data(self, params): - """Issue a patch command without a body. Assert 400.""" - # create a pool - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - pool_name = params.get('name', "newpool") - self.addCleanup(self.client.delete, url='/'+pool_name) - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(201, result.status_code) - - # Update pool with bad post data. Ensure 400 - result = self.client.patch('/'+pool_name) - self.assertEqual(400, result.status_code) - - @ddt.data( - { - 'name': "newpool", - 'weight': 10, - } - ) - def test_patch_pool_non_exist(self, params): - """Issue patch command to pool that doesn't exist. Assert 404.""" - doc = helpers.create_pool_body( - weight=5, - uri=self.mongodb_url - ) - result = self.client.patch('/nonexistpool', data=doc) - self.assertEqual(404, result.status_code) - - @ddt.data( - {'name': u'\u6c49\u5b57\u6f22\u5b57'}, - {'name': 'i'*65}, - {'weight': -1} - ) - def test_insert_pool_bad_data(self, params): - """Create pools with invalid names and weights. Assert 400.""" - self.skip("FIXME: https://bugs.launchpad.net/zaqar/+bug/1373486") - doc = helpers.create_pool_body( - weight=params.get('weight', 10), - uri=self.mongodb_url - ) - pool_name = params.get('name', "newpool") - self.addCleanup(self.client.delete, url='/'+pool_name) - result = self.client.put('/'+pool_name, data=doc) - self.assertEqual(400, result.status_code) - - def test_delete_pool_non_exist(self): - """Delete a pool that doesn't exist. Assert 404.""" - result = self.client.delete('/nonexistpool') - self.assertEqual(204, result.status_code) diff --git a/zaqar/tests/functional/wsgi/v1_1/test_queues.py b/zaqar/tests/functional/wsgi/v1_1/test_queues.py deleted file mode 100644 index fa418787..00000000 --- a/zaqar/tests/functional/wsgi/v1_1/test_queues.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -import ddt -import six - -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers - - -class NamedBinaryStr(six.binary_type): - - """Wrapper for six.binary_type to facilitate overriding __name__.""" - - -class NamedUnicodeStr(six.text_type): - - """Unicode string look-alike to facilitate overriding __name__.""" - - def __init__(self, value): - self.value = value - - def __str__(self): - return self.value - - def encode(self, enc): - return self.value.encode(enc) - - def __format__(self, formatstr): - """Workaround for ddt bug. - - DDT will always call __format__ even when __name__ exists, - which blows up for Unicode strings under Py2. - """ - return '' - - -class NamedDict(dict): - - """Wrapper for dict to facilitate overriding __name__.""" - - -def annotated(test_name, test_input): - if isinstance(test_input, dict): - annotated_input = NamedDict(test_input) - elif isinstance(test_input, six.text_type): - annotated_input = NamedUnicodeStr(test_input) - else: - annotated_input = NamedBinaryStr(test_input) - - setattr(annotated_input, '__name__', test_name) - return annotated_input - - -@ddt.ddt -class TestInsertQueue(base.V1_1FunctionalTestBase): - - """Tests for Insert queue.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestInsertQueue, self).setUp() - self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, - "v1.1") - - self.header = helpers.create_zaqar_headers(self.cfg) - self.headers_response_empty = {'location'} - self.client.set_base_url(self.base_url) - self.client.headers = self.header - - @ddt.data('qtestqueue', 'TESTqueue', 'hyphen-name', '_undersore', - annotated('test_insert_queue_long_name', 'i' * 64)) - def test_insert_queue(self, queue_name): - """Create Queue.""" - self.url = self.base_url + '/queues/' + queue_name - self.addCleanup(self.client.delete, self.url) - - result = self.client.put(self.url) - self.assertEqual(201, result.status_code) - - response_headers = set(result.headers.keys()) - self.assertIsSubset(self.headers_response_empty, response_headers) - - test_insert_queue.tags = ['positive', 'smoke'] - - @ddt.data(annotated('test_insert_queue_non_ascii_name', - u'\u6c49\u5b57\u6f22\u5b57'), - '@$@^qw', - annotated('test_insert_queue_invalid_name_length', 'i' * 65)) - def test_insert_queue_invalid_name(self, queue_name): - """Create Queue.""" - if six.PY2 and isinstance(queue_name, NamedUnicodeStr): - queue_name = queue_name.encode('utf-8') - - self.url = self.base_url + '/queues/' + queue_name - self.addCleanup(self.client.delete, self.url) - - result = self.client.put(self.url) - self.assertEqual(400, result.status_code) - - test_insert_queue_invalid_name.tags = ['negative'] - - def test_insert_queue_header_plaintext(self): - """Insert Queue with 'Accept': 'plain/text'.""" - path = '/queues/plaintextheader' - self.addCleanup(self.client.delete, path) - - header = {"Accept": 'plain/text'} - result = self.client.put(path, headers=header) - self.assertEqual(406, result.status_code) - - test_insert_queue_header_plaintext.tags = ['negative'] - - def test_insert_queue_header_asterisk(self): - """Insert Queue with 'Accept': '*/*'.""" - path = '/queues/asteriskinheader' - headers = {'Accept': '*/*', - 'Client-ID': str(uuid.uuid4()), - 'X-Project-ID': '518b51ea133c4facadae42c328d6b77b'} - self.addCleanup(self.client.delete, url=path, headers=headers) - - result = self.client.put(path, headers=headers) - self.assertEqual(201, result.status_code) - - test_insert_queue_header_asterisk.tags = ['positive'] - - def test_insert_queue_with_metadata(self): - """Insert queue with a non-empty request body.""" - self.url = self.base_url + '/queues/hasmetadata' - doc = {"queue": "Has Metadata"} - self.addCleanup(self.client.delete, self.url) - result = self.client.put(self.url, data=doc) - - self.assertEqual(201, result.status_code) - - self.url = self.base_url + '/queues/hasmetadata' - result = self.client.get(self.url) - - self.assertEqual(200, result.status_code) - self.assertEqual({"queue": "Has Metadata"}, result.json()) - - test_insert_queue_with_metadata.tags = ['negative'] - - def tearDown(self): - super(TestInsertQueue, self).tearDown() - - -@ddt.ddt -class TestQueueMisc(base.V1_1FunctionalTestBase): - - server_class = base.ZaqarServer - - def setUp(self): - super(TestQueueMisc, self).setUp() - self.base_url = self.cfg.zaqar.url - self.client.set_base_url(self.base_url) - - self.queue_url = self.base_url + ('/{0}/queues/{1}' - .format("v1.1", uuid.uuid1())) - - def test_list_queues(self): - """List Queues.""" - - self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - - result = self.client.get('/{0}/queues' - .format("v1.1")) - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'queue_list') - - test_list_queues.tags = ['smoke', 'positive'] - - def test_list_queues_detailed(self): - """List Queues with detailed = True.""" - - self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - - params = {'detailed': True} - result = self.client.get('/{0}/queues' - .format("v1.1"), - params=params) - self.assertEqual(200, result.status_code) - self.assertSchema(result.json(), 'queue_list') - - response_keys = result.json()['queues'][0].keys() - self.assertIn('metadata', response_keys) - - test_list_queues_detailed.tags = ['smoke', 'positive'] - - @ddt.data(0, -1, 1001) - def test_list_queue_invalid_limit(self, limit): - """List Queues with a limit value that is not allowed.""" - - params = {'limit': limit} - result = self.client.get('/{0}/queues' - .format("v1.1"), - params=params) - self.assertEqual(400, result.status_code) - - test_list_queue_invalid_limit.tags = ['negative'] - - def test_check_queue_exists(self): - """Checks if queue exists.""" - - self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - - result = self.client.head(self.queue_url) - self.assertEqual(405, result.status_code) - - test_check_queue_exists.tags = ['negative'] - - def test_get_queue_malformed_marker(self): - """List queues with invalid marker.""" - - path = '/{0}/queues?marker=zzz'.format("v1.1") - result = self.client.get(path) - self.assertEqual(200, result.status_code) - - test_get_queue_malformed_marker.tags = ['negative'] - - def test_get_stats_empty_queue(self): - """Get queue stats on an empty queue.""" - - result = self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - self.assertEqual(201, result.status_code) - - stats_url = self.queue_url + '/stats' - - # Get stats on an empty queue - result = self.client.get(stats_url) - self.assertEqual(200, result.status_code) - - expected_response = {'messages': - {'claimed': 0, 'total': 0, 'free': 0}} - self.assertEqual(expected_response, result.json()) - - test_get_stats_empty_queue.tags = ['positive'] - - @ddt.data(0, 1) - def test_get_queue_stats_claimed(self, claimed): - """Get stats on a queue.""" - result = self.client.put(self.queue_url) - self.addCleanup(self.client.delete, self.queue_url) - self.assertEqual(201, result.status_code) - - # Post Messages to the test queue - doc = helpers.create_message_body_v1_1( - messagecount=self.limits.max_messages_per_claim_or_pop) - - message_url = self.queue_url + '/messages' - result = self.client.post(message_url, data=doc) - self.assertEqual(201, result.status_code) - - if claimed > 0: - claim_url = self.queue_url + '/claims?limit=' + str(claimed) - doc = {'ttl': 300, 'grace': 300} - result = self.client.post(claim_url, data=doc) - self.assertEqual(201, result.status_code) - - # Get stats on the queue. - stats_url = self.queue_url + '/stats' - result = self.client.get(stats_url) - self.assertEqual(200, result.status_code) - - self.assertQueueStats(result.json(), claimed) - - test_get_queue_stats_claimed.tags = ['positive'] - - def test_ping_queue(self): - pass - - def tearDown(self): - super(TestQueueMisc, self).tearDown() - - -class TestQueueNonExisting(base.V1_1FunctionalTestBase): - - """Test Actions on non existing queue.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestQueueNonExisting, self).setUp() - if self.cfg.version != "v1": - self.skipTest("Not Supported") - - self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, - "v1.1") - self.queue_url = (self.base_url + - '/queues/0a5b1b85-4263-11e3-b034-28cfe91478b9') - self.client.set_base_url(self.queue_url) - - self.header = helpers.create_zaqar_headers(self.cfg) - self.headers_response_empty = {'location'} - self.header = helpers.create_zaqar_headers(self.cfg) - - def test_get_stats(self): - """Get stats on non existing Queue.""" - result = self.client.get('/stats') - self.assertEqual(200, result.status_code) - self.assertEqual([], result.json()) - - def test_get_metadata(self): - """Get metadata on non existing Queue.""" - result = self.client.get('/') - self.assertEqual(200, result.status_code) - self.assertEqual([], result.json()) - - def test_get_messages(self): - """Get messages on non existing Queue.""" - result = self.client.get('/messages') - self.assertEqual(200, result.status_code) - self.assertEqual([], result.json()) - - def test_post_messages(self): - """Post messages to a non existing Queue.""" - doc = [{"ttl": 200, "body": {"Home": ""}}] - result = self.client.post('/messages', data=doc) - self.assertEqual(201, result.status_code) - - # check existence of queue - result = self.client.get() - self.assertEqual(200, result.status_code) - self.assertNotEqual([], result.json()) - - def test_claim_messages(self): - """Claim messages from a non existing Queue.""" - doc = {"ttl": 200, "grace": 300} - result = self.client.post('/claims', data=doc) - self.assertEqual(200, result.status_code) - self.assertEqual([], result.json()) - - def test_delete_queue(self): - """Delete non existing Queue.""" - result = self.client.delete() - self.assertEqual(204, result.status_code) diff --git a/zaqar/tests/functional/wsgi/v2/__init__.py b/zaqar/tests/functional/wsgi/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/functional/wsgi/v2/test_subscriptions.py b/zaqar/tests/functional/wsgi/v2/test_subscriptions.py deleted file mode 100644 index 067da8d0..00000000 --- a/zaqar/tests/functional/wsgi/v2/test_subscriptions.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import division - -import time -import uuid - -import ddt - -from zaqar.tests.functional import base -from zaqar.tests.functional import helpers as func_helpers -from zaqar.tests import helpers - - -@ddt.ddt -class TestSubscriptions(base.V2FunctionalTestBase): - - """Tests for Subscriptions.""" - - server_class = base.ZaqarServer - - def setUp(self): - super(TestSubscriptions, self).setUp() - - self.queue_name = uuid.uuid1() - self.queue_url = ("{url}/{version}/queues/{queue}".format( - url=self.cfg.zaqar.url, - version="v2", - queue=self.queue_name)) - - self.client.put(self.queue_url) - - self.subscriptions_url = self.queue_url + '/subscriptions/' - self.client.set_base_url(self.subscriptions_url) - - def tearDown(self): - # Delete test queue subscriptions after each test case. - result = self.client.get(self.subscriptions_url) - subscriptions = result.json()['subscriptions'] - for sub in subscriptions: - sub_url = self.subscriptions_url + sub['id'] - self.client.delete(sub_url) - # Delete test queue. - self.client.delete(self.queue_url) - super(TestSubscriptions, self).tearDown() - - @helpers.is_slow(condition=lambda self: self.class_ttl_gc_interval > 1) - def test_expired_subscription(self): - # Default TTL value is 600. - doc = func_helpers.create_subscription_body() - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - longlive_id = result.json()['subscription_id'] - - # This is a minimum TTL allowed by server. - ttl_for_shortlive = 60 - doc = func_helpers.create_subscription_body( - subscriber='http://expire.me', ttl=ttl_for_shortlive) - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - shortlive_id = result.json()['subscription_id'] - shortlive_url = self.subscriptions_url + shortlive_id - - # Let's wait for subscription to expire. - for i in range(self.class_ttl_gc_interval + ttl_for_shortlive): - time.sleep(1) - result = self.client.get(shortlive_url) - if result.status_code == 404: - break - else: - self.fail("Didn't remove the subscription in time.") - - # Make sure the expired subscription is not returned when listing. - result = self.client.get(self.subscriptions_url) - self.assertEqual(200, result.status_code) - subscriptions = result.json()['subscriptions'] - self.assertEqual(1, len(subscriptions)) - self.assertEqual(longlive_id, subscriptions[0]['id']) - - @helpers.is_slow(condition=lambda self: self.class_ttl_gc_interval > 1) - def test_update_ttl(self): - # Default TTL value is 600. - doc = func_helpers.create_subscription_body() - result = self.client.post(data=doc) - self.assertEqual(201, result.status_code) - subscription_id = result.json()['subscription_id'] - subscription_url = self.subscriptions_url + subscription_id - - # This is a minimum TTL allowed by server. - updated_ttl = 60 - update_fields = { - 'ttl': updated_ttl - } - result = self.client.patch(subscription_url, data=update_fields) - self.assertEqual(204, result.status_code) - - # Let's wait for updated subscription to expire. - for i in range(self.class_ttl_gc_interval + updated_ttl): - time.sleep(1) - result = self.client.get(subscription_url) - if result.status_code == 404: - break - else: - self.fail("Didn't remove the subscription in time.") - - # Make sure the expired subscription is not returned when listing. - result = self.client.get(self.subscriptions_url) - self.assertEqual(200, result.status_code) - subscriptions = result.json()['subscriptions'] - self.assertEqual(0, len(subscriptions)) diff --git a/zaqar/tests/helpers.py b/zaqar/tests/helpers.py deleted file mode 100644 index 647e409b..00000000 --- a/zaqar/tests/helpers.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import functools -import os -import tempfile -import uuid - -import six -import testtools - - -RUN_ALL_TESTS = os.environ.get('ZAQAR_TEST_EVERYTHING') - - -def _test_variable_set(variable): - return os.environ.get(variable, RUN_ALL_TESTS) is None - - -SKIP_SLOW_TESTS = _test_variable_set('ZAQAR_TEST_SLOW') -SKIP_MONGODB_TESTS = _test_variable_set('ZAQAR_TEST_MONGODB') -SKIP_REDIS_TESTS = _test_variable_set('ZAQAR_TEST_REDIS') -SKIP_SWIFT_TESTS = _test_variable_set('ZAQAR_TEST_SWIFT') - - -@contextlib.contextmanager -def expect(*exc_type): - """A context manager to validate raised exceptions. - - Can be used as an alternative to testtools.ExpectedException. - - Notable differences: - 1. This context manager accepts child classes of the - given type, testing that an "except" statement - referencing the given type would indeed catch it when - raised by the statement(s) defined inside the context. - 2. When the expected exception (or a child thereof) is - not raised, this context manager *always* raises - an AssertionError, both when a different exception - is raised, and when no exception is raised at all. - - :param *exc_type: Exception type(s) expected to be raised during - execution of the "with" context. - """ - assert len(exc_type) > 0 - - try: - yield - except exc_type: - pass - else: - raise AssertionError( - 'Not raised: %s' % ', '.join(e.__name__ for e in exc_type)) - - -@contextlib.contextmanager -def partitions(controller, count): - """Context manager to create several partitions for testing. - - The partitions are automatically deleted when the context manager - goes out of scope. - - :param controller: - :param count: int - number of partitions to create - :returns: [(str, int, [str])] - names, weights, hosts - """ - spec = [(six.text_type(uuid.uuid1()), i, - [six.text_type(i)]) for i in range(count)] - for n, w, h in spec: - controller.create(n, w, h) - - yield spec - - for n, _, _ in spec: - controller.delete(n) - - -@contextlib.contextmanager -def partition(controller, name, weight, hosts): - """Context manager to create a single partition for testing. - - The partition is automatically deleted when the context manager - goes out of scope. - - :param controller: storage handler - :param name: str - partition name - :param weight: int - partition weight - :param hosts: [str] - hosts associated with this partition - :returns: (str, int, [str]) - name, weight, host used in construction - """ - controller.create(name, weight, hosts) - yield (name, weight, hosts) - controller.delete(name) - - -@contextlib.contextmanager -def entry(controller, project, queue, partition, host, metadata={}): - """Context manager to create a catalogue entry for testing. - - The entry is automatically deleted when the context manager - goes out of scope. - - :param controller: storage handler - :param project: str - namespace for queue - :param queue: str - name of queue - :param partition: str - associated partition - :param host: str - representative host - :param metadata: dict - metadata representation for this entry - :returns: (str, str, str, str, dict) - (project, queue, part, host, meta) - """ - controller.insert(project, queue, partition, host, metadata) - yield (project, queue, partition, host, metadata) - controller.delete(project, queue) - - -@contextlib.contextmanager -def entries(controller, count): - """Context manager to create several catalogue entries for testing. - - The entries are automatically deleted when the context manager - goes out of scope. - - :param controller: storage handler - :param count: int - number of entries to create - :returns: [(str, str, str, str)] - [(project, queue, partition, host)] - """ - spec = [(u'_', six.text_type(uuid.uuid1()), six.text_type(i), - six.text_type(i)) - for i in range(count)] - - for p, q, n, h in spec: - controller.insert(p, q, n, h) - - yield spec - - for p, q, _, _ in spec: - controller.delete(p, q) - - -@contextlib.contextmanager -def pool_entry(controller, project, queue, pool): - """Context manager to create a catalogue entry for testing. - - The entry is automatically deleted when the context manager - goes out of scope. - - :param controller: storage handler - :type controller: queues.storage.base:CatalogueBase - :param project: namespace for queue - :type project: six.text_type - :param queue: name of queue - :type queue: six.text_type - :param pool: an identifier for the pool - :type pool: six.text_type - :returns: (project, queue, pool) - :rtype: (six.text_type, six.text_type, six.text_type) - """ - controller.insert(project, queue, pool) - yield (project, queue, pool) - controller.delete(project, queue) - - -@contextlib.contextmanager -def pool_entries(controller, pool_ctrl, count): - """Context manager to create several catalogue entries for testing. - - The entries are automatically deleted when the context manager - goes out of scope. - - :param controller: storage handler - :type controller: queues.storage.base:CatalogueBase - :param count: number of entries to create - :type count: int - :returns: [(project, queue, pool)] - :rtype: [(six.text_type, six.text_type, six.text_type)] - """ - spec = [(u'_', six.text_type(uuid.uuid1()), six.text_type(i)) - for i in range(count)] - - for p, q, s in spec: - pool_ctrl.create(s, 100, s) - controller.insert(p, q, s) - - yield spec - - for p, q, s in spec: - controller.delete(p, q) - pool_ctrl.delete(s) - - -def requires_mongodb(test_case): - """Decorator to flag a test case as being dependent on MongoDB. - - MongoDB-specific tests will be skipped unless the ZAQAR_TEST_MONGODB - environment variable is set. If the variable is set, the tests will - assume that mongod is running and listening on localhost. - """ - - reason = ('Skipping tests that require MongoDB. Ensure ' - 'mongod is running on localhost and then set ' - 'ZAQAR_TEST_MONGODB in order to enable tests ' - 'that are specific to this storage backend. ') - - return testtools.skipIf(SKIP_MONGODB_TESTS, reason)(test_case) - - -def requires_redis(test_case): - """Decorator to flag a test case as being dependent on Redis. - - Redis-specific tests will be skipped unless the ZAQAR_TEST_REDIS - environment variable is set. If the variable is set, the tests will - assume that redis is running and listening on localhost. - """ - - reason = ('Skipping tests that require Redis. Ensure ' - 'Redis is running on localhost and then set ' - 'ZAQAR_TEST_REDIS in order to enable tests ' - 'that are specific to this storage backend. ') - - return testtools.skipIf(SKIP_REDIS_TESTS, reason)(test_case) - - -def requires_swift(test_case): - """Decorator to flag a test case as being dependent on Swift. - - Redis-specific tests will be skipped unless the ZAQAR_TEST_SWIFT - environment variable is set. If the variable is set, the tests will - assume that Swift is accessible and configured properly. - """ - - reason = ('Skipping tests that require Swift. Ensure Swift is running ' - 'and then set ZAQAR_TEST_SWIFT in order to enable tests ' - 'that are specific to this storage backend. ') - - return testtools.skipIf(SKIP_SWIFT_TESTS, reason)(test_case) - - -def is_slow(condition=lambda self: True): - """Decorator to flag slow tests. - - Slow tests will be skipped unless ZAQAR_TEST_SLOW is set, and - condition(self) returns True. - - :param condition: Function that returns True IFF the test will be slow; - useful for child classes which may modify the behavior of a test - such that it may or may not be slow. - """ - - def decorator(test_method): - @functools.wraps(test_method) - def wrapper(self): - if SKIP_SLOW_TESTS and condition(self): - msg = ('Skipping slow test. Set ZAQAR_TEST_SLOW ' - 'to enable slow tests.') - - self.skipTest(msg) - - test_method(self) - - return wrapper - - return decorator - - -def override_mongo_conf(conf_file, test): - test_mongo_url = os.environ.get('ZAQAR_TEST_MONGODB_URL') - if test_mongo_url: - parser = six.moves.configparser.ConfigParser() - parser.read(test.conf_path(conf_file)) - sections = ['drivers:management_store:mongodb', - 'drivers:message_store:mongodb'] - for section in sections: - if not parser.has_section(section): - parser.add_section(section) - parser.set(section, 'uri', test_mongo_url) - if not parser.has_section('oslo_policy'): - parser.add_section('oslo_policy') - parser.set('oslo_policy', 'policy_file', test.conf_path('policy.json')) - fd, path = tempfile.mkstemp() - conf_fd = os.fdopen(fd, 'w') - try: - parser.write(conf_fd) - finally: - conf_fd.close() - test.addCleanup(os.remove, path) - return path - else: - return conf_file diff --git a/zaqar/tests/tempest_plugin/__init__.py b/zaqar/tests/tempest_plugin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/api_schema/__init__.py b/zaqar/tests/tempest_plugin/api_schema/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/api_schema/response/__init__.py b/zaqar/tests/tempest_plugin/api_schema/response/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/api_schema/response/v1/__init__.py b/zaqar/tests/tempest_plugin/api_schema/response/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/api_schema/response/v1/queues.py b/zaqar/tests/tempest_plugin/api_schema/response/v1/queues.py deleted file mode 100644 index bf645f1a..00000000 --- a/zaqar/tests/tempest_plugin/api_schema/response/v1/queues.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -list_link = { - 'type': 'object', - 'properties': { - 'rel': {'type': 'string'}, - 'href': { - 'type': 'string', - 'format': 'uri' - } - }, - 'required': ['href', 'rel'] -} - -list_queue = { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'metadata': {'type': 'object'} - }, - 'required': ['name', 'href'] -} - -list_queues = { - 'status_code': [200, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': list_link, - 'maxItems': 1 - }, - 'queues': { - 'type': 'array', - 'items': list_queue - } - }, - 'required': ['links', 'queues'] - } -} - -age = { - 'type': 'number', - 'minimum': 0 -} - -message_link = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'age': age, - 'created': { - 'type': 'string', - 'format': 'date-time' - } - }, - 'required': ['href', 'age', 'created'] -} - -messages = { - 'type': 'object', - 'properties': { - 'free': {'type': 'number'}, - 'claimed': {'type': 'number'}, - 'total': {'type': 'number'}, - 'oldest': message_link, - 'newest': message_link - }, - 'required': ['free', 'claimed', 'total'] -} - -queue_stats = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': messages - }, - 'required': ['messages'] - } -} - -resource_schema = { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 -} - -post_messages = { - 'status_code': [201], - 'response_body': { - 'type': 'object', - 'properties': { - 'resources': resource_schema, - 'partial': {'type': 'boolean'} - } - }, - 'required': ['resources', 'partial'] -} - -message_ttl = { - 'type': 'number', - 'minimum': 1 -} - -list_messages_links = { - 'type': 'array', - 'maxItems': 1, - 'minItems': 1, - 'items': { - 'type': 'object', - 'properties': { - 'rel': {'type': 'string'}, - 'href': {'type': 'string'} - }, - 'required': ['rel', 'href'] - } -} - -list_messages_response = { - 'type': 'array', - 'minItems': 1, - 'items': { - 'type': 'object', - 'properties': { - 'href': {'type': 'string'}, - 'ttl': message_ttl, - 'age': age, - 'body': {'type': 'object'} - }, - 'required': ['href', 'ttl', 'age', 'body'] - } -} - -list_messages = { - 'status_code': [200, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'links': list_messages_links, - 'messages': list_messages_response - } - }, - 'required': ['links', 'messages'] -} - -single_message = { - 'type': 'object', - 'properties': { - 'href': {'type': 'string'}, - 'ttl': message_ttl, - 'age': age, - 'body': {'type': 'object'} - }, - 'required': ['href', 'ttl', 'age', 'body'] -} - -get_single_message = { - 'status_code': [200], - 'response_body': single_message -} - -get_multiple_messages = { - 'status_code': [200], - 'response_body': { - 'type': 'array', - 'items': single_message, - 'minItems': 1 - } -} - -messages_claimed = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'ttl': message_ttl, - 'age': {'type': 'number'}, - 'body': {'type': 'object'} - }, - 'required': ['href', 'ttl', 'age', 'body'] -} - -claim_messages = { - 'status_code': [201, 204], - 'response_body': { - 'type': 'array', - 'items': messages_claimed, - 'minItems': 1 - } -} - -claim_ttl = { - 'type': 'number', - 'minimum': 1 -} - -query_claim = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'age': {'type': 'number'}, - 'ttl': claim_ttl, - 'messages': { - 'type': 'array', - 'minItems': 1 - } - }, - 'required': ['ttl', 'age', 'messages'] - } -} diff --git a/zaqar/tests/tempest_plugin/api_schema/response/v1_1/__init__.py b/zaqar/tests/tempest_plugin/api_schema/response/v1_1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/api_schema/response/v1_1/queues.py b/zaqar/tests/tempest_plugin/api_schema/response/v1_1/queues.py deleted file mode 100644 index 92b5b6b8..00000000 --- a/zaqar/tests/tempest_plugin/api_schema/response/v1_1/queues.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -list_link = { - 'type': 'object', - 'properties': { - 'rel': {'type': 'string'}, - 'href': { - 'type': 'string', - 'format': 'uri' - } - }, - 'required': ['href', 'rel'] -} - -list_queue = { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'metadata': {'type': 'object'} - }, - 'required': ['name', 'href'] -} - -list_queues = { - 'status_code': [200, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': list_link, - 'maxItems': 1 - }, - 'queues': { - 'type': 'array', - 'items': list_queue - } - }, - 'required': ['links', 'queues'] - } -} - -age = { - 'type': 'number', - 'minimum': 0 -} - -message_link = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'age': age, - 'created': { - 'type': 'string', - 'format': 'date-time' - } - }, - 'required': ['href', 'age', 'created'] -} - -messages = { - 'type': 'object', - 'properties': { - 'free': {'type': 'number'}, - 'claimed': {'type': 'number'}, - 'total': {'type': 'number'}, - 'oldest': message_link, - 'newest': message_link - }, - 'required': ['free', 'claimed', 'total'] -} - -queue_stats = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': messages - }, - 'required': ['messages'] - } -} - -resource_schema = { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 -} - -post_messages = { - 'status_code': [201], - 'response_body': { - 'type': 'object', - 'properties': { - 'resources': resource_schema, - 'partial': {'type': 'boolean'} - } - }, - 'required': ['resources', 'partial'] -} - -message_ttl = { - 'type': 'number', - 'minimum': 1 -} - -list_messages_links = { - 'type': 'array', - 'maxItems': 1, - 'minItems': 1, - 'items': { - 'type': 'object', - 'properties': { - 'rel': {'type': 'string'}, - 'href': {'type': 'string'} - }, - 'required': ['rel', 'href'] - } -} - -list_messages_response = { - 'type': 'array', - 'minItems': 1, - 'items': { - 'type': 'object', - 'properties': { - 'href': {'type': 'string'}, - 'ttl': message_ttl, - 'age': age, - 'body': {'type': 'object'} - }, - 'required': ['href', 'ttl', 'age', 'body'] - } -} - -list_messages = { - 'status_code': [200, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'links': list_messages_links, - 'messages': list_messages_response - } - }, - 'required': ['links', 'messages'] -} - -single_message = { - 'type': 'object', - 'properties': { - 'href': {'type': 'string'}, - 'ttl': message_ttl, - 'age': age, - 'body': {'type': 'object'}, - 'id': {'type': 'string'} - }, - 'required': ['href', 'ttl', 'age', 'body', 'id'] -} - -get_single_message = { - 'status_code': [200], - 'response_body': single_message -} - -get_multiple_messages = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": single_message, - "minItems": 1, - } - } - } -} - -messages_claimed = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'ttl': message_ttl, - 'age': {'type': 'number'}, - 'body': {'type': 'object'}, - 'id': {'type': 'string'} - }, - 'required': ['href', 'ttl', 'age', 'body', 'id'] -} - -claim_messages = { - 'status_code': [201, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": single_message, - "minItems": 1, - } - } - } -} - -claim_ttl = { - 'type': 'number', - 'minimum': 1 -} - -query_claim = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'age': {'type': 'number'}, - 'ttl': claim_ttl, - 'messages': { - 'type': 'array', - 'minItems': 1 - } - }, - 'required': ['ttl', 'age', 'messages'] - } -} diff --git a/zaqar/tests/tempest_plugin/api_schema/response/v2/__init__.py b/zaqar/tests/tempest_plugin/api_schema/response/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py b/zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py deleted file mode 100644 index e22ffaac..00000000 --- a/zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -list_link = { - 'type': 'object', - 'properties': { - 'rel': {'type': 'string'}, - 'href': { - 'type': 'string', - 'format': 'uri' - } - }, - 'required': ['href', 'rel'] -} - -list_queue = { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'metadata': {'type': 'object'} - }, - 'required': ['name', 'href'] -} - -list_queues = { - 'status_code': [200, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'links': { - 'type': 'array', - 'items': list_link, - 'maxItems': 1 - }, - 'queues': { - 'type': 'array', - 'items': list_queue - } - }, - 'required': ['links', 'queues'] - } -} - -age = { - 'type': 'number', - 'minimum': 0 -} - -message_link = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'age': age, - 'created': { - 'type': 'string', - 'format': 'date-time' - } - }, - 'required': ['href', 'age', 'created'] -} - -messages = { - 'type': 'object', - 'properties': { - 'free': {'type': 'number'}, - 'claimed': {'type': 'number'}, - 'total': {'type': 'number'}, - 'oldest': message_link, - 'newest': message_link - }, - 'required': ['free', 'claimed', 'total'] -} - -queue_stats = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': messages - }, - 'required': ['messages'] - } -} - -resource_schema = { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 -} - -post_messages = { - 'status_code': [201], - 'response_body': { - 'type': 'object', - 'properties': { - 'resources': resource_schema, - 'partial': {'type': 'boolean'} - } - }, - 'required': ['resources', 'partial'] -} - -message_ttl = { - 'type': 'number', - 'minimum': 1 -} - -list_messages_links = { - 'type': 'array', - 'maxItems': 1, - 'minItems': 0, - 'items': { - 'type': 'object', - 'properties': { - 'rel': {'type': 'string'}, - 'href': {'type': 'string'} - }, - 'required': ['rel', 'href'] - } -} - -list_messages_response = { - 'type': 'array', - 'minItems': 0, - 'items': { - 'type': 'object', - 'properties': { - 'href': {'type': 'string'}, - 'ttl': message_ttl, - 'age': age, - 'body': {'type': 'object'} - }, - 'required': ['href', 'ttl', 'age', 'body'] - } -} - -list_messages = { - 'status_code': [200, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'links': list_messages_links, - 'messages': list_messages_response - } - }, - 'required': ['links', 'messages'] -} - -single_message = { - 'type': 'object', - 'properties': { - 'href': {'type': 'string'}, - 'ttl': message_ttl, - 'age': age, - 'body': {'type': 'object'}, - 'id': {'type': 'string'} - }, - 'required': ['href', 'ttl', 'age', 'body', 'id'] -} - -get_single_message = { - 'status_code': [200], - 'response_body': single_message -} - -get_multiple_messages = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": single_message, - "minItems": 1, - } - } - } -} - -messages_claimed = { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'ttl': message_ttl, - 'age': {'type': 'number'}, - 'body': {'type': 'object'}, - 'id': {'type': 'string'} - }, - 'required': ['href', 'ttl', 'age', 'body', 'id'] -} - -claim_messages = { - 'status_code': [201, 204], - 'response_body': { - 'type': 'object', - 'properties': { - 'messages': { - "type": "array", - "items": single_message, - "minItems": 1, - } - } - } -} - -claim_ttl = { - 'type': 'number', - 'minimum': 1 -} - -query_claim = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'age': {'type': 'number'}, - 'ttl': claim_ttl, - 'messages': { - 'type': 'array', - 'minItems': 1 - } - }, - 'required': ['ttl', 'age', 'messages'] - } -} - -create_subscription = { - 'status_code': [201], - 'response_body': { - 'type': 'object', - 'properties': { - 'subscription_id': {'type': 'string'}, - }, - 'required': ['subscription_id'] - } -} - -single_subscription = { - 'type': 'object', - 'properties': { - 'subscriber': {'type': 'string'}, - 'source': {'type': 'string'}, - 'options': {'type': 'object'}, - 'id': {'type': 'string'}, - 'ttl': message_ttl, - }, - 'required': ['subscriber', 'source', 'options', 'id', 'ttl'] -} - -show_single_subscription = { - 'status_code': [200], - 'response_body': single_subscription -} - -list_subscriptions = { - 'status_code': [200], - 'response_body': { - 'type': 'object', - 'properties': { - 'subscriptions': { - "type": "array", - "items": single_subscription, - }, - 'links': { - 'type': 'array', - 'items': list_link, - 'maxItems': 1 - }, - }, - 'required': ['subscriptions', 'links'] - } -} diff --git a/zaqar/tests/tempest_plugin/config.py b/zaqar/tests/tempest_plugin/config.py deleted file mode 100644 index eb7c5b66..00000000 --- a/zaqar/tests/tempest_plugin/config.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2016 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg - -service_option = cfg.BoolOpt('zaqar', - default=True, - help="Whether or not Zaqar is expected to be " - "available") - -messaging_group = cfg.OptGroup(name='messaging', - title='Messaging Service') - -MessagingGroup = [ - cfg.StrOpt('catalog_type', - default='messaging', - help='Catalog type of the Messaging service.'), - cfg.IntOpt('max_queues_per_page', - default=20, - help='The maximum number of queue records per page when ' - 'listing queues'), - cfg.IntOpt('max_queue_metadata', - default=65536, - help='The maximum metadata size for a queue'), - cfg.IntOpt('max_messages_per_page', - default=20, - help='The maximum number of queue message per page when ' - 'listing (or) posting messages'), - cfg.IntOpt('max_message_size', - default=262144, - help='The maximum size of a message body'), - cfg.IntOpt('max_messages_per_claim', - default=20, - help='The maximum number of messages per claim'), - cfg.IntOpt('max_message_ttl', - default=1209600, - help='The maximum ttl for a message'), - cfg.IntOpt('max_claim_ttl', - default=43200, - help='The maximum ttl for a claim'), - cfg.IntOpt('max_claim_grace', - default=43200, - help='The maximum grace period for a claim'), -] diff --git a/zaqar/tests/tempest_plugin/plugin.py b/zaqar/tests/tempest_plugin/plugin.py deleted file mode 100644 index 70ffad4b..00000000 --- a/zaqar/tests/tempest_plugin/plugin.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2016 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from tempest.test_discover import plugins - -from zaqar.tests.tempest_plugin import config as zaqar_config - - -class ZaqarTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(__file__)))[0] - # Note: base_path should be set to the top directory - # of zaqar. - base_path += '/../..' - test_dir = "zaqar/tests/tempest_plugin/tests" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - conf.register_group(zaqar_config.messaging_group) - conf.register_opts(zaqar_config.MessagingGroup, group='messaging') - conf.register_opt(zaqar_config.service_option, - group='service_available') - - def get_opt_lists(self): - return [('messaging', zaqar_config.MessagingGroup), - ('service_available', [zaqar_config.service_option])] diff --git a/zaqar/tests/tempest_plugin/services/__init__.py b/zaqar/tests/tempest_plugin/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/services/messaging/__init__.py b/zaqar/tests/tempest_plugin/services/messaging/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/services/messaging/json/__init__.py b/zaqar/tests/tempest_plugin/services/messaging/json/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/services/messaging/json/messaging_client.py b/zaqar/tests/tempest_plugin/services/messaging/json/messaging_client.py deleted file mode 100644 index f1ec7594..00000000 --- a/zaqar/tests/tempest_plugin/services/messaging/json/messaging_client.py +++ /dev/null @@ -1,506 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - -from oslo_serialization import jsonutils as json -from six.moves.urllib import parse as urllib -from tempest.lib.common import rest_client - -from zaqar.tests.tempest_plugin.api_schema.response.v1 \ - import queues as v1schema -from zaqar.tests.tempest_plugin.api_schema.response.v1_1 \ - import queues as v11schema -from zaqar.tests.tempest_plugin.api_schema.response.v2 \ - import queues as v2schema - - -class MessagingClient(rest_client.RestClient): - - def __init__(self, auth_provider, service, region, **kwargs): - super(MessagingClient, self).__init__( - auth_provider, service, region, **kwargs) - - self.version = '1' - self.uri_prefix = 'v{0}'.format(self.version) - - client_id = uuidutils.generate_uuid(dashed=False) - self.headers = {'Client-ID': client_id} - - -class V1MessagingClient(MessagingClient): - def __init__(self, auth_provider, service, region, **kwargs): - super(V1MessagingClient, self).__init__( - auth_provider, service, region, **kwargs) - - self.version = '1' - - def list_queues(self): - uri = '{0}/queues'.format(self.uri_prefix) - resp, body = self.get(uri) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v1schema.list_queues, resp, body) - return resp, body - - def create_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.put(uri, body=None) - self.expected_success(201, resp.status) - return resp, body - - def show_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri) - self.expected_success(204, resp.status) - return resp, body - - def head_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.head(uri) - self.expected_success(204, resp.status) - return resp, body - - def delete_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.delete(uri) - self.expected_success(204, resp.status) - return resp, body - - def show_queue_stats(self, queue_name): - uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri) - body = json.loads(body) - self.validate_response(v1schema.queue_stats, resp, body) - return resp, body - - def show_queue_metadata(self, queue_name): - uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = json.loads(body) - return resp, body - - def set_queue_metadata(self, queue_name, rbody): - uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) - resp, body = self.put(uri, body=json.dumps(rbody)) - self.expected_success(204, resp.status) - return resp, body - - def post_messages(self, queue_name, rbody): - uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name) - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - - body = json.loads(body) - self.validate_response(v1schema.post_messages, resp, body) - return resp, body - - def list_messages(self, queue_name): - uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix, - queue_name) - resp, body = self.get(uri, extra_headers=True, headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v1schema.list_messages, resp, body) - - return resp, body - - def show_single_message(self, message_uri): - resp, body = self.get(message_uri, extra_headers=True, - headers=self.headers) - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v1schema.get_single_message, resp, - body) - return resp, body - - def show_multiple_messages(self, message_uri): - resp, body = self.get(message_uri, extra_headers=True, - headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v1schema.get_multiple_messages, - resp, - body) - - return resp, body - - def delete_messages(self, message_uri): - resp, body = self.delete(message_uri) - self.expected_success(204, resp.status) - return resp, body - - def post_claims(self, queue_name, rbody, url_params=False): - uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name) - if url_params: - uri += '?%s' % urllib.urlencode(url_params) - - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - - body = json.loads(body) - self.validate_response(v1schema.claim_messages, resp, body) - return resp, body - - def query_claim(self, claim_uri): - resp, body = self.get(claim_uri) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v1schema.query_claim, resp, body) - return resp, body - - def update_claim(self, claim_uri, rbody): - resp, body = self.patch(claim_uri, body=json.dumps(rbody)) - self.expected_success(204, resp.status) - return resp, body - - def delete_claim(self, claim_uri): - resp, body = self.delete(claim_uri) - self.expected_success(204, resp.status) - return resp, body - - -class V11MessagingClient(MessagingClient): - def __init__(self, auth_provider, service, region, **kwargs): - super(V11MessagingClient, self).__init__( - auth_provider, service, region, **kwargs) - - self.version = '1.1' - self.uri_prefix = 'v{0}'.format(self.version) - - def list_queues(self): - uri = '{0}/queues'.format(self.uri_prefix) - resp, body = self.get(uri, headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v11schema.list_queues, resp, body) - return resp, body - - def create_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.put(uri, body=None, headers=self.headers) - self.expected_success(201, resp.status) - return resp, body - - def show_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri, headers=self.headers) - self.expected_success(200, resp.status) - return resp, body - - def delete_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.delete(uri, headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def show_queue_stats(self, queue_name): - uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri, headers=self.headers) - body = json.loads(body) - self.validate_response(v11schema.queue_stats, resp, body) - return resp, body - - def show_queue_metadata(self, queue_name): - uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri, headers=self.headers) - self.expected_success(200, resp.status) - body = json.loads(body) - return resp, body - - def set_queue_metadata(self, queue_name, rbody): - uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) - resp, body = self.put(uri, body=json.dumps(rbody), - headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def post_messages(self, queue_name, rbody): - uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name) - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - - body = json.loads(body) - self.validate_response(v11schema.post_messages, resp, body) - return resp, body - - def list_messages(self, queue_name): - uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix, - queue_name) - resp, body = self.get(uri, extra_headers=True, headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v11schema.list_messages, resp, body) - - return resp, body - - def show_single_message(self, message_uri): - resp, body = self.get(message_uri, extra_headers=True, - headers=self.headers) - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v11schema.get_single_message, resp, - body) - return resp, body - - def show_multiple_messages(self, message_uri): - resp, body = self.get(message_uri, extra_headers=True, - headers=self.headers) - - if resp['status'] != '404': - body = json.loads(body) - self.validate_response(v11schema.get_multiple_messages, - resp, - body) - - return resp, body - - def delete_messages(self, message_uri): - resp, body = self.delete(message_uri, headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def post_claims(self, queue_name, rbody, url_params=False): - uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name) - if url_params: - uri += '?%s' % urllib.urlencode(url_params) - - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - - body = json.loads(body) - self.validate_response(v11schema.claim_messages, resp, body) - return resp, body - - def query_claim(self, claim_uri): - resp, body = self.get(claim_uri, headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v11schema.query_claim, resp, body) - return resp, body - - def update_claim(self, claim_uri, rbody): - resp, body = self.patch(claim_uri, body=json.dumps(rbody), - headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def delete_claim(self, claim_uri): - resp, body = self.delete(claim_uri, headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - -class V2MessagingClient(MessagingClient): - def __init__(self, auth_provider, service, region, **kwargs): - super(V2MessagingClient, self).__init__( - auth_provider, service, region, **kwargs) - - self.version = '2' - self.uri_prefix = 'v{0}'.format(self.version) - - def list_queues(self, url_params=False): - uri = '{0}/queues'.format(self.uri_prefix) - if url_params: - uri += '?%s' % urllib.urlencode(url_params) - - resp, body = self.get(uri, headers=self.headers) - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v2schema.list_queues, resp, body) - return resp, body - - def create_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.put(uri, body=None, headers=self.headers) - self.expected_success(201, resp.status) - return resp, body - - def show_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri, headers=self.headers) - self.expected_success(200, resp.status) - return resp, body - - def delete_queue(self, queue_name): - uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name) - resp, body = self.delete(uri, headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def purge_queue(self, queue_name, resource=None): - uri = '{0}/queues/{1}/purge'.format(self.uri_prefix, queue_name) - rbody = {"resource_types": ["messages", "subscriptions"]} - if resource: - rbody = {"resource_types": resource} - resp, body = self.post(uri, body=json.dumps(rbody), - headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def show_queue_stats(self, queue_name): - uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri, headers=self.headers) - body = json.loads(body) - self.validate_response(v2schema.queue_stats, resp, body) - return resp, body - - def show_queue_metadata(self, queue_name): - uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) - resp, body = self.get(uri, headers=self.headers) - self.expected_success(200, resp.status) - body = json.loads(body) - return resp, body - - def set_queue_metadata(self, queue_name, rbody): - uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name) - resp, body = self.put(uri, body=json.dumps(rbody), - headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def post_messages(self, queue_name, rbody): - uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name) - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - - body = json.loads(body) - self.validate_response(v2schema.post_messages, resp, body) - return resp, body - - def list_messages(self, queue_name): - uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix, - queue_name) - resp, body = self.get(uri, extra_headers=True, headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v2schema.list_messages, resp, body) - - return resp, body - - def show_single_message(self, message_uri): - resp, body = self.get(message_uri, extra_headers=True, - headers=self.headers) - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v2schema.get_single_message, resp, - body) - return resp, body - - def show_multiple_messages(self, message_uri): - resp, body = self.get(message_uri, extra_headers=True, - headers=self.headers) - - if resp['status'] != '404': - body = json.loads(body) - self.validate_response(v2schema.get_multiple_messages, - resp, - body) - - return resp, body - - def delete_messages(self, message_uri): - resp, body = self.delete(message_uri, headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def post_claims(self, queue_name, rbody, url_params=False): - uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name) - if url_params: - uri += '?%s' % urllib.urlencode(url_params) - - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - - body = json.loads(body) - self.validate_response(v2schema.claim_messages, resp, body) - return resp, body - - def query_claim(self, claim_uri): - resp, body = self.get(claim_uri, headers=self.headers) - - if resp['status'] != '204': - body = json.loads(body) - self.validate_response(v2schema.query_claim, resp, body) - return resp, body - - def update_claim(self, claim_uri, rbody): - resp, body = self.patch(claim_uri, body=json.dumps(rbody), - headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def delete_claim(self, claim_uri): - resp, body = self.delete(claim_uri, headers=self.headers) - self.expected_success(204, resp.status) - return resp, body - - def create_subscription(self, queue_name, rbody): - uri = '{0}/queues/{1}/subscriptions'.format(self.uri_prefix, - queue_name) - - resp, body = self.post(uri, body=json.dumps(rbody), - extra_headers=True, - headers=self.headers) - body = json.loads(body) - self.validate_response(v2schema.create_subscription, resp, body) - return resp, body - - def delete_subscription(self, queue_name, subscription_id): - uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix, - queue_name, - subscription_id) - resp, body = self.delete(uri, headers=self.headers) - return resp, body - - def list_subscription(self, queue_name): - uri = '{0}/queues/{1}/subscriptions/'.format(self.uri_prefix, - queue_name) - resp, body = self.get(uri, headers=self.headers) - body = json.loads(body) - self.validate_response(v2schema.list_subscriptions, resp, body) - return resp, body - - def show_subscription(self, queue_name, subscription_id): - uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix, - queue_name, - subscription_id) - resp, body = self.get(uri, headers=self.headers) - body = json.loads(body) - self.validate_response(v2schema.show_single_subscription, resp, body) - return resp, body - - def update_subscription(self, queue_name, subscription_id, rbody): - uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix, - queue_name, - subscription_id) - resp, body = self.patch(uri, body=json.dumps(rbody), - headers=self.headers) - return resp, body diff --git a/zaqar/tests/tempest_plugin/tests/__init__.py b/zaqar/tests/tempest_plugin/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/tests/base.py b/zaqar/tests/tempest_plugin/tests/base.py deleted file mode 100644 index df428291..00000000 --- a/zaqar/tests/tempest_plugin/tests/base.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest import test - -from zaqar.tests.tempest_plugin.services.messaging.json import messaging_client - -CONF = config.CONF - - -class BaseMessagingTest(test.BaseTestCase): - - """Base class for the Messaging (Zaqar) tests - - It is assumed that the following option is defined in the - [service_available] section of etc/tempest.conf - - messaging as True - """ - - credentials = ['primary'] - - @classmethod - def skip_checks(cls): - super(BaseMessagingTest, cls).skip_checks() - if not CONF.service_available.zaqar: - raise cls.skipException("Zaqar support is required") - - @classmethod - def resource_setup(cls): - super(BaseMessagingTest, cls).resource_setup() - cls.messaging_cfg = CONF.messaging - - @classmethod - def create_queue(cls, queue_name): - """Wrapper utility that returns a test queue.""" - resp, body = cls.client.create_queue(queue_name) - return resp, body - - @classmethod - def delete_queue(cls, queue_name): - """Wrapper utility that deletes a test queue.""" - resp, body = cls.client.delete_queue(queue_name) - return resp, body - - @classmethod - def list_queues(cls): - """Wrapper utility that lists queues.""" - resp, body = cls.client.list_queues() - return resp, body - - @classmethod - def get_queue_stats(cls, queue_name): - """Wrapper utility that returns the queue stats.""" - resp, body = cls.client.show_queue_stats(queue_name) - return resp, body - - @classmethod - def get_queue_metadata(cls, queue_name): - """Wrapper utility that gets a queue metadata.""" - resp, body = cls.client.show_queue_metadata(queue_name) - return resp, body - - @classmethod - def set_queue_metadata(cls, queue_name, rbody): - """Wrapper utility that sets the metadata of a queue.""" - resp, body = cls.client.set_queue_metadata(queue_name, rbody) - return resp, body - - @classmethod - def post_messages(cls, queue_name, rbody): - """Wrapper utility that posts messages to a queue.""" - resp, body = cls.client.post_messages(queue_name, rbody) - - return resp, body - - @classmethod - def list_messages(cls, queue_name): - """Wrapper utility that lists the messages in a queue.""" - resp, body = cls.client.list_messages(queue_name) - - return resp, body - - @classmethod - def delete_messages(cls, message_uri): - """Wrapper utility that deletes messages.""" - resp, body = cls.client.delete_messages(message_uri) - - return resp, body - - @classmethod - def post_claims(cls, queue_name, rbody, url_params=False): - """Wrapper utility that claims messages.""" - resp, body = cls.client.post_claims( - queue_name, rbody, url_params=False) - - return resp, body - - @classmethod - def query_claim(cls, claim_uri): - """Wrapper utility that gets a claim.""" - resp, body = cls.client.query_claim(claim_uri) - - return resp, body - - @classmethod - def update_claim(cls, claim_uri, rbody): - """Wrapper utility that updates a claim.""" - resp, body = cls.client.update_claim(claim_uri, rbody) - - return resp, body - - @classmethod - def release_claim(cls, claim_uri): - """Wrapper utility that deletes a claim.""" - resp, body = cls.client.delete_claim(claim_uri) - - return resp, body - - @classmethod - def generate_message_body(cls, repeat=1): - """Wrapper utility that sets the metadata of a queue.""" - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - rbody = ([{'body': message_body, 'ttl': message_ttl}] * repeat) - return rbody - - -class BaseV1MessagingTest(BaseMessagingTest): - """Base class for the Messaging (Zaqar) v1.0 tests.""" - @classmethod - def setup_clients(cls): - super(BaseV1MessagingTest, cls).setup_clients() - cls.client = messaging_client.V1MessagingClient( - cls.os.auth_provider, - CONF.messaging.catalog_type, - CONF.identity.region, - build_interval=CONF.compute.build_interval, - build_timeout=CONF.compute.build_timeout) - - @classmethod - def check_queue_exists(cls, queue_name): - """Wrapper utility that checks the existence of a test queue.""" - resp, body = cls.client.show_queue(queue_name) - return resp, body - - @classmethod - def check_queue_exists_head(cls, queue_name): - """Wrapper utility checks the head of a queue via http HEAD.""" - resp, body = cls.client.head_queue(queue_name) - return resp, body - - -class BaseV11MessagingTest(BaseMessagingTest): - """Base class for the Messaging (Zaqar) v1.1 tests.""" - @classmethod - def setup_clients(cls): - super(BaseV11MessagingTest, cls).setup_clients() - cls.client = messaging_client.V11MessagingClient( - cls.os.auth_provider, - CONF.messaging.catalog_type, - CONF.identity.region, - build_interval=CONF.compute.build_interval, - build_timeout=CONF.compute.build_timeout) - - @classmethod - def generate_message_body(cls, repeat=1): - """Wrapper utility that sets the metadata of a queue.""" - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - body = ([{'body': message_body, 'ttl': message_ttl}] * repeat) - rbody = {'messages': body} - return rbody - - -class BaseV2MessagingTest(BaseMessagingTest): - """Base class for the Messaging (Zaqar) v2 tests.""" - @classmethod - def setup_clients(cls): - super(BaseV2MessagingTest, cls).setup_clients() - cls.client = messaging_client.V2MessagingClient( - cls.os.auth_provider, - CONF.messaging.catalog_type, - CONF.identity.region, - build_interval=CONF.compute.build_interval, - build_timeout=CONF.compute.build_timeout) - - @classmethod - def purge_queue(cls, queue_name, resource=None): - resp, body = cls.client.purge_queue( - queue_name, resource) - return resp, body - - @classmethod - def create_subscription(cls, queue_name, rbody): - resp, body = cls.client.create_subscription( - queue_name, rbody) - return resp, body - - @classmethod - def delete_subscription(cls, queue_name, subscription_id): - resp, body = cls.client.delete_subscription(queue_name, - subscription_id) - return resp, body - - @classmethod - def list_subscription(cls, queue_name): - resp, body = cls.client.list_subscription(queue_name) - return resp, body - - @classmethod - def show_subscription(cls, queue_name, subscription_id): - resp, body = cls.client.show_subscription(queue_name, subscription_id) - return resp, body - - @classmethod - def update_subscription(cls, queue_name, subscription_id, rbody): - resp, body = cls.client.update_subscription(queue_name, - subscription_id, - rbody) - return resp, body - - @classmethod - def generate_subscription_body(cls): - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - option_body = {key: value} - subscribers = ['http://fake:8080', 'https://fake:8080', - 'mailto:fake@123.com'] - rbody = [{'options': option_body, 'ttl': message_ttl, - 'subscriber': subscriber} for subscriber in subscribers] - return rbody - - @classmethod - def generate_message_body(cls, repeat=1): - """Wrapper utility that sets the metadata of a queue.""" - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - body = ([{'body': message_body, 'ttl': message_ttl}] * repeat) - rbody = {'messages': body} - return rbody diff --git a/zaqar/tests/tempest_plugin/tests/v1/__init__.py b/zaqar/tests/tempest_plugin/tests/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/tests/v1/test_claims.py b/zaqar/tests/tempest_plugin/tests/v1/test_claims.py deleted file mode 100644 index 6a3499b6..00000000 --- a/zaqar/tests/tempest_plugin/tests/v1/test_claims.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six.moves.urllib import parse as urlparse -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators - -from zaqar.tests.tempest_plugin.tests import base - - -CONF = config.CONF - - -class TestClaims(base.BaseV1MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestClaims, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.create_queue(cls.queue_name) - - def _post_and_claim_messages(self, queue_name, repeat=1): - # Post Messages - message_body = self.generate_message_body(repeat=repeat) - self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - - # Post Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - resp, body = self.client.post_claims(queue_name=self.queue_name, - rbody=claim_body) - - return resp, body - - @decorators.idempotent_id('936cb1ca-b7af-44dd-a752-805e8c98156f') - def test_post_claim(self): - _, body = self._post_and_claim_messages(queue_name=self.queue_name) - claimed_message_uri = body[0]['href'] - - # Delete Claimed message - self.client.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('84e491f4-68c6-451f-9846-b8f868eb27c5') - def test_query_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - - # Query Claim - claim_uri = resp['location'][resp['location'].find('/v1'):] - self.client.query_claim(claim_uri) - - # Delete Claimed message - claimed_message_uri = body[0]['href'] - self.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('420ef0c5-9bd6-4b82-b06d-d9da330fefd3') - def test_update_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - - claim_uri = resp['location'][resp['location'].find('/v1'):] - claimed_message_uri = body[0]['href'] - - # Update Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - update_rbody = {"ttl": claim_ttl} - - self.client.update_claim(claim_uri, rbody=update_rbody) - - # Verify claim ttl >= updated ttl value - _, body = self.client.query_claim(claim_uri) - updated_claim_ttl = body["ttl"] - self.assertGreaterEqual(claim_ttl, updated_claim_ttl) - - # Delete Claimed message - self.client.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('fd4c7921-cb3f-4ed8-9ac8-e8f1e74c44aa') - def test_release_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - claim_uri = resp['location'][resp['location'].find('/v1'):] - - # Release Claim - self.client.delete_claim(claim_uri) - - # Delete Claimed message - # This will implicitly verify that the claim is deleted. - message_uri = urlparse.urlparse(claim_uri).path - self.client.delete_messages(message_uri) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestClaims, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v1/test_messages.py b/zaqar/tests/tempest_plugin/tests/v1/test_messages.py deleted file mode 100644 index 27e6def4..00000000 --- a/zaqar/tests/tempest_plugin/tests/v1/test_messages.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestMessages(base.BaseV1MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestMessages, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.client.create_queue(cls.queue_name) - - def _post_messages(self, repeat=CONF.messaging.max_messages_per_page): - message_body = self.generate_message_body(repeat=repeat) - resp, body = self.post_messages(queue_name=self.queue_name, - rbody=message_body) - return resp, body - - @decorators.idempotent_id('93867172-a414-4eb3-a639-96e943c516b4') - def test_post_messages(self): - # Post Messages - resp, _ = self._post_messages() - - # Get on the posted messages - message_uri = resp['location'][resp['location'].find('/v1'):] - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('c967d59a-e919-41cb-994b-1c4300452c80') - def test_list_messages(self): - # Post Messages - self._post_messages() - - # List Messages - resp, _ = self.list_messages(queue_name=self.queue_name) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('2a68e3de-24df-47c3-9039-ec4156656bf8') - def test_get_message(self): - # Post Messages - _, body = self._post_messages() - message_uri = body['resources'][0] - - # Get posted message - resp, _ = self.client.show_single_message(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('c4b0a30b-efda-4b87-a395-0c43140df74d') - def test_get_multiple_messages(self): - # Post Messages - resp, _ = self._post_messages() - message_uri = resp['location'][resp['location'].find('/v1'):] - - # Get posted messages - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('fc0fca47-dd8b-4ecc-8522-d9c191f9bc9f') - def test_delete_single_message(self): - # Post Messages - _, body = self._post_messages() - message_uri = body['resources'][0] - - # Delete posted message & verify the delete operration - self.client.delete_messages(message_uri) - - message_uri = message_uri.replace('/messages/', '/messages?ids=') - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response has to be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('204', resp['status']) - - @decorators.idempotent_id('00cca069-5c8f-4b42-bff1-c577da2a4546') - def test_delete_multiple_messages(self): - # Post Messages - resp, _ = self._post_messages() - message_uri = resp['location'][resp['location'].find('/v1'):] - - # Delete multiple messages - self.client.delete_messages(message_uri) - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response has to be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('204', resp['status']) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestMessages, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v1/test_queues.py b/zaqar/tests/tempest_plugin/tests/v1/test_queues.py deleted file mode 100644 index afda1b35..00000000 --- a/zaqar/tests/tempest_plugin/tests/v1/test_queues.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from six import moves -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc -from testtools import matchers - -from zaqar.tests.tempest_plugin.tests import base - - -class TestQueues(base.BaseV1MessagingTest): - - @decorators.idempotent_id('9f1c4c72-80c5-4dac-acf3-188cef42e36c') - def test_create_delete_queue(self): - # Create & Delete Queue - queue_name = data_utils.rand_name('test') - _, body = self.create_queue(queue_name) - - self.addCleanup(self.client.delete_queue, queue_name) - # NOTE(gmann): create_queue returns response status code as 201 - # so specifically checking the expected empty response body as - # this is not going to be checked in response_checker(). - self.assertEqual('', body) - - self.delete_queue(queue_name) - self.assertRaises(lib_exc.NotFound, - self.client.show_queue, - queue_name) - - -class TestManageQueue(base.BaseV1MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestManageQueue, cls).resource_setup() - cls.queues = list() - for _ in moves.xrange(5): - queue_name = data_utils.rand_name('Queues-Test') - cls.queues.append(queue_name) - # Create Queue - cls.client.create_queue(queue_name) - - @decorators.idempotent_id('ccd3d69e-f156-4c5f-8a12-b4f24bee44e1') - def test_check_queue_existence(self): - # Checking Queue Existence - for queue_name in self.queues: - self.check_queue_exists(queue_name) - - @decorators.idempotent_id('e27634d8-9c8f-47d8-a677-655c47658d3e') - def test_check_queue_head(self): - # Checking Queue Existence by calling HEAD - for queue_name in self.queues: - self.check_queue_exists_head(queue_name) - - @decorators.idempotent_id('0a0feeca-7768-4303-806d-82bbbb796ad3') - def test_list_queues(self): - # Listing queues - _, body = self.list_queues() - self.assertEqual(len(body['queues']), len(self.queues)) - for item in body['queues']: - self.assertIn(item['name'], self.queues) - - @decorators.idempotent_id('8fb66602-077d-49d6-ae1a-5f2091739178') - def test_get_queue_stats(self): - # Retrieve random queue - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Get Queue Stats for a newly created Queue - _, body = self.get_queue_stats(queue_name) - msgs = body['messages'] - for element in ('free', 'claimed', 'total'): - self.assertEqual(0, msgs[element]) - for element in ('oldest', 'newest'): - self.assertNotIn(element, msgs) - - @decorators.idempotent_id('0e2441e6-6593-4bdb-a3c0-20e66eeb3fff') - def test_set_and_get_queue_metadata(self): - # Retrieve random queue - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Check the Queue has no metadata - _, body = self.get_queue_metadata(queue_name) - self.assertThat(body, matchers.HasLength(0)) - # Create metadata - key3 = [0, 1, 2, 3, 4] - key2 = data_utils.rand_name('value') - req_body1 = dict() - req_body1[data_utils.rand_name('key3')] = key3 - req_body1[data_utils.rand_name('key2')] = key2 - req_body = dict() - req_body[data_utils.rand_name('key1')] = req_body1 - # Set Queue Metadata - self.set_queue_metadata(queue_name, req_body) - - # Get Queue Metadata - _, body = self.get_queue_metadata(queue_name) - self.assertThat(body, matchers.Equals(req_body)) - - @classmethod - def resource_cleanup(cls): - for queue_name in cls.queues: - cls.client.delete_queue(queue_name) - super(TestManageQueue, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v1_1/__init__.py b/zaqar/tests/tempest_plugin/tests/v1_1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/tests/v1_1/test_claims.py b/zaqar/tests/tempest_plugin/tests/v1_1/test_claims.py deleted file mode 100644 index fde4fbc9..00000000 --- a/zaqar/tests/tempest_plugin/tests/v1_1/test_claims.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six.moves.urllib import parse as urlparse -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators - -from zaqar.tests.tempest_plugin.tests import base - - -CONF = config.CONF - - -class TestClaims(base.BaseV11MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestClaims, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.create_queue(cls.queue_name) - - def _post_and_claim_messages(self, queue_name, repeat=1): - # Post Messages - message_body = self.generate_message_body(repeat=repeat) - self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - - # Post Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - resp, body = self.client.post_claims(queue_name=self.queue_name, - rbody=claim_body) - - return resp, body - - @decorators.idempotent_id('6fc4b79d-2366-4911-b0be-6446a1f02aea') - def test_post_claim(self): - _, body = self._post_and_claim_messages(queue_name=self.queue_name) - claimed_message_uri = body['messages'][0]['href'] - - # Delete Claimed message - self.client.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('c61829f9-104a-4860-a136-6af2a89f3eef') - def test_query_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - - # Query Claim - claim_uri = resp['location'][resp['location'].find('/v1.1'):] - self.client.query_claim(claim_uri) - - # Delete Claimed message - claimed_message_uri = body['messages'][0]['href'] - self.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('57b9d065-1995-420f-9173-4d716339e3b9') - def test_update_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - - claim_uri = resp['location'][resp['location'].find('/v1.1'):] - claimed_message_uri = body['messages'][0]['href'] - - # Update Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - update_rbody = {"ttl": claim_ttl} - - self.client.update_claim(claim_uri, rbody=update_rbody) - - # Verify claim ttl >= updated ttl value - _, body = self.client.query_claim(claim_uri) - updated_claim_ttl = body["ttl"] - self.assertGreaterEqual(claim_ttl, updated_claim_ttl) - - # Delete Claimed message - self.client.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('71081c25-3eb4-427a-b2f3-891d0c5f7d32') - def test_release_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - claim_uri = resp['location'][resp['location'].find('/v1.1'):] - - # Release Claim - self.client.delete_claim(claim_uri) - - # Delete Claimed message - # This will implicitly verify that the claim is deleted. - message_uri = urlparse.urlparse(claim_uri).path - self.client.delete_messages(message_uri) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestClaims, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v1_1/test_messages.py b/zaqar/tests/tempest_plugin/tests/v1_1/test_messages.py deleted file mode 100644 index 8d039d58..00000000 --- a/zaqar/tests/tempest_plugin/tests/v1_1/test_messages.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestMessages(base.BaseV11MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestMessages, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.client.create_queue(cls.queue_name) - - def _post_messages(self, repeat=CONF.messaging.max_messages_per_page): - message_body = self.generate_message_body(repeat=repeat) - resp, body = self.post_messages(queue_name=self.queue_name, - rbody=message_body) - return resp, body - - @decorators.idempotent_id('7e506151-6870-404b-b746-801a72599418') - def test_post_messages(self): - # Post Messages - resp, _ = self._post_messages() - - # Get on the posted messages - message_uri = resp['location'][resp['location'].find('/v1.1'):] - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('d50ae94e-5f84-4e2d-bda4-48d8ab3ee3af') - def test_list_messages(self): - # Post Messages - self._post_messages() - - # List Messages - resp, _ = self.list_messages(queue_name=self.queue_name) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('a679d6be-f2ef-4649-b03c-710c72126b2f') - def test_get_message(self): - # Post Messages - _, body = self._post_messages() - message_uri = body['resources'][0] - - # Get posted message - resp, _ = self.client.show_single_message(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('889e7263-2d0c-4de1-aebd-d192157e347d') - def test_get_multiple_messages(self): - # Post Messages - resp, _ = self._post_messages() - message_uri = resp['location'][resp['location'].find('/v1.1'):] - - # Get posted messages - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('9a932955-933e-4283-86d0-85dd121c2edf') - def test_delete_single_message(self): - # Post Messages - _, body = self._post_messages() - message_uri = body['resources'][0] - - # Delete posted message & verify the delete operration - self.client.delete_messages(message_uri) - - message_uri = message_uri.replace('/messages/', '/messages?ids=') - # The test has an assertion here, because the response has to be 404 - # in this case(different from v1). - self.assertRaises(lib_exc.NotFound, - self.client.show_multiple_messages, - message_uri) - - @decorators.idempotent_id('ad1949a7-36c0-45be-8020-df91467d0bbb') - def test_delete_multiple_messages(self): - # Post Messages - resp, _ = self._post_messages() - message_uri = resp['location'][resp['location'].find('/v1.1'):] - - # Delete multiple messages - self.client.delete_messages(message_uri) - # The test has an assertion here, because the response has to be 404 - # in this case(different from v1). - self.assertRaises(lib_exc.NotFound, - self.client.show_multiple_messages, - message_uri) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestMessages, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v1_1/test_queues.py b/zaqar/tests/tempest_plugin/tests/v1_1/test_queues.py deleted file mode 100644 index 4cbfb17b..00000000 --- a/zaqar/tests/tempest_plugin/tests/v1_1/test_queues.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from six import moves -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from testtools import matchers - -from zaqar.tests.tempest_plugin.tests import base - - -class TestQueues(base.BaseV11MessagingTest): - - @decorators.idempotent_id('16a8a53e-e9f0-4c84-bc74-4e4e89abae75') - def test_create_delete_queue(self): - # Create & Delete Queue - queue_name = data_utils.rand_name('test') - _, body = self.create_queue(queue_name) - - self.addCleanup(self.client.delete_queue, queue_name) - # NOTE(gmann): create_queue returns response status code as 201 - # so specifically checking the expected empty response body as - # this is not going to be checked in response_checker(). - self.assertEqual('', body) - - self.delete_queue(queue_name) - # lazy queue - self.client.show_queue(queue_name) - - -class TestManageQueue(base.BaseV11MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestManageQueue, cls).resource_setup() - cls.queues = list() - for _ in moves.xrange(5): - queue_name = data_utils.rand_name('Queues-Test') - cls.queues.append(queue_name) - # Create Queue - cls.client.create_queue(queue_name) - - @decorators.idempotent_id('a27e9c2f-66ba-400e-b175-7b2e3f0f2ef9') - def test_list_queues(self): - # Listing queues - _, body = self.list_queues() - self.assertEqual(len(body['queues']), len(self.queues)) - for item in body['queues']: - self.assertIn(item['name'], self.queues) - - @decorators.idempotent_id('fe1a0655-08f9-4366-b1c6-b4bc4d30396b') - def test_get_queue_stats(self): - # Retrieve random queue - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Get Queue Stats for a newly created Queue - _, body = self.get_queue_stats(queue_name) - msgs = body['messages'] - for element in ('free', 'claimed', 'total'): - self.assertEqual(0, msgs[element]) - for element in ('oldest', 'newest'): - self.assertNotIn(element, msgs) - - @decorators.skip_because(bug='1543900') - @decorators.idempotent_id('883a5fba-fb87-4663-b941-cf4a25e64607') - def test_set_and_get_queue_metadata(self): - # Retrieve random queue - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Check the Queue has no metadata - _, body = self.get_queue_metadata(queue_name) - self.assertThat(body, matchers.HasLength(0)) - # Create metadata - key3 = [0, 1, 2, 3, 4] - key2 = data_utils.rand_name('value') - req_body1 = dict() - req_body1[data_utils.rand_name('key3')] = key3 - req_body1[data_utils.rand_name('key2')] = key2 - req_body = dict() - req_body[data_utils.rand_name('key1')] = req_body1 - # Set Queue Metadata - self.set_queue_metadata(queue_name, req_body) - - # Get Queue Metadata - _, body = self.get_queue_metadata(queue_name) - self.assertThat(body, matchers.Equals(req_body)) - - @classmethod - def resource_cleanup(cls): - for queue_name in cls.queues: - cls.client.delete_queue(queue_name) - super(TestManageQueue, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/__init__.py b/zaqar/tests/tempest_plugin/tests/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_claims.py b/zaqar/tests/tempest_plugin/tests/v2/test_claims.py deleted file mode 100644 index 1fb568b0..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_claims.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six.moves.urllib import parse as urlparse -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators - -from zaqar.tests.tempest_plugin.tests import base - - -CONF = config.CONF - - -class TestClaims(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestClaims, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.create_queue(cls.queue_name) - - def _post_and_claim_messages(self, queue_name, repeat=1): - # Post Messages - message_body = self.generate_message_body(repeat=repeat) - self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - - # Post Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - resp, body = self.client.post_claims(queue_name=self.queue_name, - rbody=claim_body) - - return resp, body - - @decorators.idempotent_id('3b839cac-d214-4fca-8c03-b8edbdcecb20') - def test_post_claim(self): - _, body = self._post_and_claim_messages(queue_name=self.queue_name) - claimed_message_uri = body['messages'][0]['href'] - - # Delete Claimed message - self.client.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('e69d047c-b3f4-4216-990e-7953407084b7') - def test_query_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - - # Query Claim - claim_uri = resp['location'][resp['location'].find('/v2'):] - self.client.query_claim(claim_uri) - - # Delete Claimed message - claimed_message_uri = body['messages'][0]['href'] - self.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('5e1e7559-77fc-4ea8-a817-cd43be23d692') - def test_update_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - - claim_uri = resp['location'][resp['location'].find('/v2'):] - claimed_message_uri = body['messages'][0]['href'] - - # Update Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - update_rbody = {"ttl": claim_ttl} - - self.client.update_claim(claim_uri, rbody=update_rbody) - - # Verify claim ttl >= updated ttl value - _, body = self.client.query_claim(claim_uri) - updated_claim_ttl = body["ttl"] - self.assertGreaterEqual(claim_ttl, updated_claim_ttl) - - # Delete Claimed message - self.client.delete_messages(claimed_message_uri) - - @decorators.idempotent_id('97c1ebcc-9d1e-463a-8673-6ec989ba3be7') - def test_release_claim(self): - # Post a Claim - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - claim_uri = resp['location'][resp['location'].find('/v2'):] - - # Release Claim - self.client.delete_claim(claim_uri) - - # Delete Claimed message - # This will implicitly verify that the claim is deleted. - message_uri = urlparse.urlparse(claim_uri).path - self.client.delete_messages(message_uri) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestClaims, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_claims_negative.py b/zaqar/tests/tempest_plugin/tests/v2/test_claims_negative.py deleted file mode 100644 index 6f51f6a4..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_claims_negative.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright (c) 2016 LARSEN & TOUBRO LIMITED. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestClaimsNegative(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestClaimsNegative, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.create_queue(cls.queue_name) - - def _post_and_claim_messages(self, queue_name, repeat=1): - # Post Messages - message_body = self.generate_message_body(repeat=repeat) - self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - - # Post Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - resp, body = self.client.post_claims(queue_name=self.queue_name, - rbody=claim_body) - return resp, body - - # Claim Messages - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('bd524990-7dff-4950-a82b-554ef1d644b6') - def test_request_claim_message_with_no_request_body(self): - # Claim a message with no request body - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - - claim_body = {} - resp, _ = self.client.post_claims(self.queue_name, - claim_body) - self.assertEqual('201', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('21de9b01-00a7-406a-a2e7-86ecfea2f21a') - def test_request_claim_message_with_invalid_character_request_body(self): - # Claim a message with invalid characters as request body - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - - claim_body = '[' - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, - claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('5149cf66-0273-438c-b9de-f8c4af56f382') - def test_request_claim_message_with_invalid_request_body(self): - # Claim a message with invalid request body - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - - claim_body = '"Try"' - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, - claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('9537b022-659e-4220-a05d-eabc10661772') - def test_request_claim_message_with_greater_value_for_limit(self): - # Claim messages with a greater limit value - message_body = self.generate_message_body(repeat=1) - self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - params = {'limit': 200} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, - claim_body, url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('b9160f04-31f0-4246-b879-329b806a0d8a') - def test_request_claim_message_with_lesser_value_for_limit(self): - # Claim messages with an invalid lesser value - message_body = self.generate_message_body(repeat=1) - _, body = self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - params = {'limit': 0} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, - claim_body, url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('5dfa2fa4-ca17-46f3-9a28-8e70fbbd7f9e') - def test_request_claim_message_with_negative_value_for_limit(self): - # Claim messages with a negative value of limit - message_body = self.generate_message_body(repeat=1) - _, body = self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - - params = {'limit': -1} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, - claim_body, url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('eb8025bb-0f42-42fd-9905-6376bdc74cf4') - def test_request_claim_message_with_no_TTL_field(self): - # Claim a message with no TTL field - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"grace": claim_grace} - resp, _ = self.client.post_claims(self.queue_name, - claim_body) - self.assertEqual('201', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('6b99cab8-17f0-4ec5-bb6a-9ad490a0eb7a') - def test_request_claim_message_with_greater_invalid_TTL_value(self): - # TTL for a claim may not exceed 1209600 seconds, - # and must be at least 60 seconds long , configurable - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - claim_ttl = data_utils.rand_int_id(start=43201, - end=43500) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('3d65af6e-b104-40a6-a15c-1cf65358e687') - def test_request_claim_message_with_lesser_invalid_TTL_value(self): - # TTL for a claim may not exceed 1209600 seconds, - # and must be at least 60 seconds long , configurable - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - claim_ttl = data_utils.rand_int_id(start=-43500, - end=0) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('86978d35-65be-44bb-aba4-0610728b5399') - def test_request_claim_message_with_no_grace_field(self): - # Grace for a claim may not exceed 1209600 seconds, - # and must be at least 60 seconds long , configurable - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_body = {"ttl": claim_ttl} - resp, _ = self.client.post_claims(self.queue_name, - claim_body) - self.assertEqual('201', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('812d9092-2d59-4dae-b67d-ce00da3f74f9') - def test_request_claim_message_with_invalid_greater_grace_value(self): - # Grace for a claim may not exceed 1209600 seconds, - # and must be at least 60 seconds long , configurable - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=43201, end=43501) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('bf10b08c-e254-49e4-a751-a0e128dce618') - def test_request_claim_message_with_invalid_lesser_grace_value(self): - # Grace for a claim may not exceed 1209600 seconds, - # and must be at least 60 seconds long , configurable - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=-43201, end=0) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('69b0d11a-40f5-4f35-847f-05f92ffadeb3') - def test_request_claim_message_with_non_JSON_request_body(self): - # Claim a messsage with an invalid JSON - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - claim_body = "123" - self.assertRaises(lib_exc.BadRequest, - self.client.post_claims, self.queue_name, claim_body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('d145ea04-203d-41f9-a893-f6e5716005b6') - def test_request_claim_message_with_invalid_url_params(self): - # Post Messages - message_body = self.generate_message_body(repeat=1) - _, body = self.client.post_messages(queue_name=self.queue_name, - rbody=message_body) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - params = {'Invalid': 'ImAnInvalidParam'} - resp, _ = self.client.post_claims(self.queue_name, - claim_body, url_params=params) - self.assertEqual('201', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('dbdf17ce-879f-4688-b71c-260cb9e4c4ab') - def test_claim_message_with_invalid_token(self): - # Claim a message without a valid token - body = self.generate_message_body() - self.client.post_messages(self.queue_name, body) - - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.post_claims, self.queue_name, claim_body) - - # Query Claim - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a1844a12-62d6-435e-906b-6b6ae538834f') - def test_query_from_a_nonexistent_queue(self): - # Query claim a non existent queue - non_existent_queue = data_utils.rand_name('rand_queuename') - non_existent_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/claims/{1}".format(non_existent_queue, - non_existent_id) - self.assertRaises(lib_exc.NotFound, - self.client.query_claim, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a2af8e9b-08fb-4079-a77a-28c0390a614a') - def test_query_claim_with_non_existing_claim_id(self): - # Query claim using a non existing claim id - non_existent_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/claims/{1}".format(self.queue_name, - non_existent_id) - self.assertRaises(lib_exc.NotFound, - self.client.query_claim, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a58c5214-68b9-47d6-a036-de73e7b2cdad') - def test_query_claim_with_invalid_token(self): - # Query claim with an invalid token - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - claim_uri = resp['location'][resp['location'].find('/v2'):] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.query_claim, claim_uri) - - # Update Claim - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('28915079-8b20-487d-ab01-64218572c543') - def test_update_claim_on_non_existing_queue(self): - # Update claim on a non existing queue - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - self.client.delete_queue(self.queue_name) - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - update_rbody = {"ttl": claim_ttl} - claim_uri = resp['location'][resp['location'].find('/v2'):] - self.assertRaises(lib_exc.NotFound, - self.client.update_claim, claim_uri, update_rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('732e9ca6-6e4f-4d66-9e78-200c3d6aca88') - def test_update_a_non_existing_claim(self): - # Update a non existing claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - update_rbody = {"ttl": claim_ttl} - claim_id = uuidutils.generate_uuid() - claim_uri = "/v2/queues/{0}/claims/{1}".format(self.queue_name, - claim_id) - self.assertRaises(lib_exc.NotFound, - self.client.update_claim, claim_uri, update_rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('925514e9-57f0-4209-a64e-8b0a72bb8f0f') - def test_update_claim_with_no_request_body(self): - # Update claim with no request body - resp, body = self._post_and_claim_messages(self.queue_name) - update_rbody = {} - claim_uri = resp['location'][resp['location'].find('/v2'):] - resp, body = self.client.update_claim(claim_uri, update_rbody) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('c17793da-112a-4e90-b2fd-a5acbfdcddc5') - def test_update_claim_with_invalid_json_in_request_body(self): - # Update claim with an invalid JSON - resp, body = self._post_and_claim_messages(self.queue_name) - update_rbody = {"123"} - claim_uri = resp['location'][resp['location'].find('/v2'):] - self.assertRaises(lib_exc.BadRequest, - self.client.update_claim, claim_uri, update_rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('1cd2fed7-6840-49cd-9b7a-1d80c01300fb') - def test_update_claim_with_invalid_token(self): - # Update claim without a valid token - resp, body = self._post_and_claim_messages(self.queue_name) - claim_uri = resp['location'][resp['location'].find('/v2'):] - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - update_rbody = {"ttl": claim_ttl} - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.update_claim, claim_uri, update_rbody) - - # Release Claim - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('b61a0d09-bc47-4b33-aa6d-7f20cbbe9bd2') - def test_release_claim_from_a_non_existing_queue(self): - # Release claim from a non existing queue - non_existent_queue = data_utils.rand_name('rand_queuename') - non_existent_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/claims/{1}".format(non_existent_queue, - non_existent_id) - resp, body = self.client.delete_claim(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('20a6e6ed-0f53-484d-aa78-717cdaa25e50') - def test_release_a_nonexisting_claim_id(self): - # Release a non existing claim - non_existent_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/claims/{1}".format(self.queue_name, - non_existent_id) - resp, body = self.client.delete_claim(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('082d50ca-bd3e-4d66-a92b-6ff917ab3b21') - def test_release_claim_with_invalid_token(self): - # Release claim without a valid token - resp, body = self._post_and_claim_messages(queue_name=self.queue_name) - claim_uri = resp['location'][resp['location'].find('/v2'):] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.delete_claim, claim_uri) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestClaimsNegative, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_messages.py b/zaqar/tests/tempest_plugin/tests/v2/test_messages.py deleted file mode 100644 index fe322e32..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_messages.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestMessages(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestMessages, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.client.create_queue(cls.queue_name) - - def _post_messages(self, repeat=CONF.messaging.max_messages_per_page): - message_body = self.generate_message_body(repeat=repeat) - resp, body = self.post_messages(queue_name=self.queue_name, - rbody=message_body) - return resp, body - - @decorators.idempotent_id('2e1a26c1-6d7b-4ae7-b510-d8e2abe9d831') - def test_post_messages(self): - # Post Messages - resp, _ = self._post_messages() - - # Get on the posted messages - message_uri = resp['location'][resp['location'].find('/v2'):] - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('f38cca63-55d7-41e7-8ff9-7254e9859aa7') - def test_list_messages(self): - # Post Messages - self._post_messages() - - # List Messages - resp, _ = self.list_messages(queue_name=self.queue_name) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('f9719398-8bb7-4660-acb6-ef87c47d726c') - def test_get_message(self): - # Post Messages - _, body = self._post_messages() - message_uri = body['resources'][0] - - # Get posted message - resp, _ = self.client.show_single_message(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('a8fe2c0f-c3f2-4278-8bd9-2fca94356f2e') - def test_get_multiple_messages(self): - # Post Messages - resp, _ = self._post_messages() - message_uri = resp['location'][resp['location'].find('/v2'):] - - # Get posted messages - resp, _ = self.client.show_multiple_messages(message_uri) - # The test has an assertion here, because the response cannot be 204 - # in this case (the client allows 200 or 204 for this API call). - self.assertEqual('200', resp['status']) - - @decorators.idempotent_id('9654fb55-8cbd-4997-8c3e-8d388276a8d9') - def test_delete_single_message(self): - # Post Messages - _, body = self._post_messages() - message_uri = body['resources'][0] - - # Delete posted message & verify the delete operration - self.client.delete_messages(message_uri) - - message_uri = message_uri.replace('/messages/', '/messages?ids=') - # The test has an assertion here, because the response has to be 404 - # in this case(different from v1). - self.assertRaises(lib_exc.NotFound, - self.client.show_multiple_messages, - message_uri) - - @decorators.idempotent_id('e025555a-fa3f-4558-859a-42d69ccf66a6') - def test_delete_multiple_messages(self): - # Post Messages - resp, _ = self._post_messages() - message_uri = resp['location'][resp['location'].find('/v2'):] - - # Delete multiple messages - self.client.delete_messages(message_uri) - # The test has an assertion here, because the response has to be 404 - # in this case(different from v1). - self.assertRaises(lib_exc.NotFound, - self.client.show_multiple_messages, - message_uri) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestMessages, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_messages_negative.py b/zaqar/tests/tempest_plugin/tests/v2/test_messages_negative.py deleted file mode 100644 index a40aa01c..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_messages_negative.py +++ /dev/null @@ -1,646 +0,0 @@ -# Copyright (c) 2016 LARSEN & TOUBRO LIMITED. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import random - -from oslo_utils import uuidutils -from six import moves -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestMessagesNegative(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestMessagesNegative, cls).resource_setup() - cls.queues = list() - for _ in moves.xrange(1): - queue_name = data_utils.rand_name('Queues-Test') - cls.queues.append(queue_name) - # Create Queue - cls.client.create_queue(queue_name) - - # Get specific Message - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('8246ee51-651c-4e2a-9a07-91848ca5e1e4') - def test_request_single_message_from_a_nonexistent_queue(self): - # List a message from a nonexistent queue - id = uuidutils.generate_uuid() - non_existent_queue = data_utils.rand_name('rand_queuename') - uri = "/v2/queues/{0}/messages/{1}".format(non_existent_queue, id) - self.assertRaises(lib_exc.NotFound, - self.client.show_single_message, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('767fdad1-37df-485a-8063-5036e8d16a12') - def test_request_a_non_existing_message(self): - # List a message with an invalid id - invalid_id = uuidutils.generate_uuid() - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages/{1}".format(queue_name, invalid_id) - self.assertRaises(lib_exc.NotFound, - self.client.show_single_message, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('ac2d1a88-5721-4bef-8dfa-53d936630e84') - def test_request_a_message_with_negative_message_id(self): - # List a message with an invalid id, negative - negative_id = '-1' - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages?ids={1}".format(queue_name, - negative_id) - self.assertRaises(lib_exc.NotFound, - self.client.show_single_message, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('ac083d78-67bb-4515-b553-2fc76499e2bd') - def test_request_a_message_without_a_token(self): - # List a message without a valid token - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages/{1}".format(queue_name, id) - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.show_single_message, uri) - - # Get a Set of Messages by ID - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('f544e745-f3da-451d-8621-c3711cd37453') - def test_request_multiple_messages_from_a_nonexistent_queue(self): - # List multiple messages from a non existent queue - id1 = uuidutils.generate_uuid() - id2 = uuidutils.generate_uuid() - queue = data_utils.rand_name('nonexistent_queue') - uri = "/v2/queues/{0}/messages?ids={1},{2}".format(queue, - id1, id2) - self.assertRaises(lib_exc.NotFound, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('654e64f8-01df-40a0-a09e-d5ec17a3e187') - def test_request_multiple_messages_with_invalid_message_id(self): - # List multiple messages by passing invalid id - invalid_id = uuidutils.generate_uuid() - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages?ids={1},{2}".format(queue_name, - invalid_id, - invalid_id) - self.assertRaises(lib_exc.NotFound, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('295a37a6-5c93-43e3-a316-3f3dffd4b242') - def test_request_multiple_messages_by_exceeding_the_default_limit(self): - # Default limit value is 20 , configurable - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - ids = str.join(',', (uuidutils.generate_uuid()) * 21) - uri = "/v2/queues/{0}/messages?ids={1}".format(queue_name, ids) - self.assertRaises(lib_exc.BadRequest, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('f96eb4a0-8930-4d5e-b8bf-11080628c761') - def test_request_message_by_passing_invalid_echo_param(self): - # Value of the echo parameter must be either true or false - echo = None - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages?echo={1}".format(queue_name, echo) - self.assertRaises(lib_exc.BadRequest, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('6f668242-6a45-48bc-8ef2-fb581e57d471') - def test_request_messages_by_passing_invalid_include_claimed_param(self): - # Value of include_claimed param must be either true or false - value = None - queue = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages?include_claimed={1}".format(queue, - value) - self.assertRaises(lib_exc.BadRequest, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('dd267387-76f6-47bd-849b-b1640051aff4') - def test_request_messages_limit_greater_than_configured_value(self): - # Default limit value is 20 , configurable - invalid_limit = data_utils.rand_int_id(21, 10000) - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages?limit={1}".format(queue_name, - invalid_limit) - self.assertRaises(lib_exc.BadRequest, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('d199f64e-0f22-4129-9bc4-ff709c01592b') - def test_request_messages_with_limit_less_than_configured_value(self): - # Default limit value is 20 , configurable - invalid_limit = data_utils.rand_int_id(-1000, 0) - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - uri = "/v2/queues/{0}/messages?limit={1}".format(queue_name, - invalid_limit) - self.assertRaises(lib_exc.BadRequest, - self.client.show_multiple_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('0b2e803c-7cb9-4c11-bed6-f976f5247b27') - def test_request_multiple_messages_request_without_a_token(self): - # List messages without a valid token - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - id1 = uuidutils.generate_uuid() - id2 = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages/{1},{2}".format(queue_name, id1, id2) - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.show_multiple_messages, uri) - - # Get Messages - - @decorators.idempotent_id('125632c4-c7ce-47fb-93fe-c446d14396f9') - def test_list_messages_with_invalid_token(self): - # List messages without a valid token - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.list_messages, queue_name) - - # Post Messages - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('5a0ba3e6-e6ca-4952-be50-fb6be7834ab7') - def test_post_messages_with_no_request_body(self): - # Post message with empty body - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - body = {} - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('af5ffb4d-c0b4-41db-aea3-bcfc8a232bd6') - def test_post_messages_with_a_bad_message(self): - # Post message with invalid message format - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - body = {'[]', '.'} - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('10bc153c-97d2-4a19-9795-e0f6993bad4f') - def test_post_messages_to_a_nonexistent_queue(self): - # Post message to a non existent queue - non_existent_queue = data_utils.rand_name('rand_queuename') - body = self.generate_message_body() - resp, _ = self.client.post_messages(non_existent_queue, body) - self.assertEqual('201', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('263d6361-4759-4f2c-be9c-12559f064135') - def test_post_messages_to_a_non_ascii_queue(self): - # Post message to a queue with non ascii queue name - queue_name = data_utils.rand_name('\u6c49\u5b57\u6f22\u5b57') - body = self.generate_message_body() - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('04c1b220-1e22-4e38-9db2-a76e8b5e2f3f') - def test_post_messages_to_a_queue_with_invalid_name(self): - # Post messages to a queue with invalid characters for queue name - queue_name = '@$@^qw@' - body = self.generate_message_body() - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('72290766-cb01-425e-856b-a57877015336') - def test_post_messages_to_a_queue_with_invalid_length_for_queue_name(self): - # Post messages to a queue with a long queue name - queue_name = 'q' * 65 - body = self.generate_message_body() - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('774e8bc8-9b20-40fb-9eed-c5368de368c5') - def test_post_messages_with_invalid_json_request_body(self): - # Post messages to a queue with non-JSON request body - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - body = "123" - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, body) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('ebbe257a-9f1e-498a-bba8-f5c71230365a') - def test_post_messages_with_TTL_less_than_60(self): - # TTL for a message may not exceed 1209600 seconds, - # and must be at least 60 seconds long. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_ttl = data_utils.\ - rand_int_id(start=0, end=60) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - rbody = ([{'body': message_body, 'ttl': message_ttl}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('6d64de03-fd57-4f07-b6f1-8563200a4b4d') - def test_post_messages_with_TTL_greater_than_1209600(self): - # TTL for a message may not exceed 1209600 seconds, and - # must be at least 60 seconds long. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_ttl = data_utils.\ - rand_int_id(start=1209601, end=1309600) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - rbody = ([{'body': message_body, 'ttl': message_ttl}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('c48802d7-7e91-4d5f-9c23-32cd4edc41ff') - def test_post_messages_with_non_int_value_of_TTL(self): - # TTL for a message may not exceed 1209600 seconds, and - # must be at least 60 seconds long. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_ttl = random.uniform(0.0, 0.120960) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - rbody = ([{'body': message_body, 'ttl': message_ttl}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('203fed96-0df3-43c0-9956-723b34b8a23b') - def test_post_messages_with_negative_value_of_TTL(self): - # TTL for a message may not exceed 1209600 seconds, and - # must be at least 60 seconds long. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_ttl = data_utils.\ - rand_int_id(start=-9999, end=-1) - - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - rbody = ([{'body': message_body, 'ttl': message_ttl}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('d3ad28e7-0c84-43cf-bb87-1574da28a10d') - def test_post_messages_without_TTL(self): - # TTL for a message may not exceed 1209600 seconds, and - # must be at least 60 seconds long. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_body = {key: value} - - rbody = ([{'body': message_body}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('662428d4-302f-4000-8ac6-1a53fb8818b8') - def test_post_messages_exceeding_message_post_size(self): - # Post messages with greater message size - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = 'a' * 1024 - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - message_body = {key: value} - - rbody = ([{'body': message_body, 'ttl': message_ttl}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('ba4f7334-1a4d-4bc8-acd3-040a1310fe62') - def test_post_messages_with_invalid_body_size(self): - # Maximum number of queue message per page - # while posting messages is 20 - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - message_body = {key: value} - rbody = ([{'body': message_body, 'ttl': message_ttl}] * 21) - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('855d36a2-e583-4355-af33-fcec0f71842c') - def test_post_messages_without_body_in_request_body(self): - # TTL for a message may not exceed 1209600 seconds, and - # must be at least 60 seconds long. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - - rbody = ([{'ttl': message_ttl}] * 1) - - self.assertRaises(lib_exc.BadRequest, - self.client.post_messages, queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('074fe312-0077-41ba-8aa9-e6d6a586a685') - def test_post_messages_with_invalid_auth_token(self): - # X-Auth-Token is not provided - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - body = self.generate_message_body() - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None) - self.assertRaises(lib_exc.Unauthorized, - self.client.post_messages, - queue_name, body) - - # Delete Messages - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('8552d5b3-7c16-4eaf-a8de-a7b178823458') - def test_delete_message_from_a_nonexistent_queue(self): - # Delete is an idempotent operation - non_existent_queue = data_utils.rand_name('rand_queuename') - message_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages?ids={1}".format(non_existent_queue, - message_id) - resp, _ = self.client.delete_messages(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a5d581f0-0403-4c2d-9ea4-048cc6cc85f0') - def test_delete_a_non_existing_message(self): - # Delete is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages?ids={1}".format(queue_name, - message_id) - resp, _ = self.client.delete_messages(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('f792f462-0ad9-41b1-9bae-636957364ca0') - def test_delete_message_with_non_existent_message_id(self): - # Delete is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages/{1}".format(queue_name, - message_id) - resp, _ = self.client.delete_messages(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('6b8f14b3-2307-49e2-aa53-75d4d4b82754') - def test_delete_multiple_non_existing_messages(self): - # Delete is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - id1 = uuidutils.generate_uuid() - id2 = uuidutils.generate_uuid() - id3 = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages?ids={1}{2}{3}".format(queue_name, - id1, id2, id3) - resp, _ = self.client.delete_messages(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('805f75fd-6447-4c8a-860c-2659d8a5b0b5') - def test_delete_message_without_id(self): - # Delete all the message from a queue - # without passing any id - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_body = self.generate_message_body(repeat=1) - self.post_messages(queue_name, message_body) - uri = "/v2/queues/{0}/messages".format(queue_name) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('85eed2fb-fa72-4886-8cfc-44c7fb58ffea') - def test_delete_message_with_invalid_message_id(self): - # Delete is an idempotent operation - # Delete a message with negative id - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages?ids=-{1}".format(queue_name, - message_id) - resp, _ = self.client.delete_messages(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('374265e7-1146-4da4-a265-38c8698e4144') - def test_delete_the_deleted_message(self): - # Delete is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_id = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages?ids={1}".format(queue_name, - message_id) - resp, _ = self.client.delete_messages(uri) - # Delete the message again - resp, _ = self.client.delete_messages(uri) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a130d499-cd41-42dd-b1f0-e859f73b00e0') - def test_delete_multiple_messages_by_exceeding_the_default_limit(self): - # Default limit value is 20 - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - ids = str.join(',', (uuidutils.generate_uuid()) * 21) - uri = "/v2/queues/{0}/messages?ids={1}".format(queue_name, ids) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('51a2f5ca-e358-4ef6-9f33-73d3e01f07b9') - def test_delete_message_without_providing_claim_id(self): - # When message is claimed; - # it cannot be deleted without a valid claim ID. - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Post Messages - message_body = self.generate_message_body(repeat=1) - self.client.post_messages(queue_name=queue_name, - rbody=message_body) - # Post Claim - claim_ttl = data_utils.rand_int_id(start=60, - end=CONF.messaging.max_claim_ttl) - claim_grace = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_claim_grace) - claim_body = {"ttl": claim_ttl, "grace": claim_grace} - resp, body = self.client.post_claims(queue_name=queue_name, - rbody=claim_body) - message_uri = body['messages'][0]['href'] - sep = "?claim_id" - uri = message_uri.split(sep, 1)[0] - self.assertRaises(lib_exc.Forbidden, - self.client.delete_messages, - uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('18fa5f43-20e6-47bd-a751-ef33e62a4315') - def test_delete_message_with_invalid_claim_id(self): - # Delete with a non existent claim id - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_body = self.generate_message_body(repeat=1) - resp, body = self.post_messages(queue_name, message_body) - message_uri = body['resources'][0] - claim_id = "?claim_id=123" - uri = message_uri + str(claim_id) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('b82e5dee-5470-4408-9dca-d4a7536ff25f') - def test_delete_message_with_no_pop_value(self): - # Pop value must be at least 1 and may not be greater than 20 - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - value = ' ' - uri = "/v2/queues/{0}/messages?pop={1}".format(queue_name, value) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('6454103d-9cfd-48da-bd8c-061e61a7e634') - def test_delete_message_with_invalid_pop_value(self): - # Pop value must be at least 1 and may not be greater than 20 - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - value = 1000000000 - uri = "/v2/queues/{0}/messages?pop={1}".format(queue_name, value) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('9874b696-352b-47d7-a338-d149d4096c28') - def test_delete_message_with_negative_pop_value(self): - # Pop value must be at least 1 and may not be greater than 20 - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - value = '-1' - uri = "/v2/queues/{0}/messages?pop={1}".format(queue_name, value) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('4044f38a-0a70-4c86-ab1b-ca369e5b443a') - def test_delete_message_with_invalid_params_with_pop(self): - # Pop & ids parameters are mutually exclusive - # Anyone of which needs to be used with delete - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - pop_value = 5 - ids_value = uuidutils.generate_uuid() - uri = "/v2/queues/{0}/messages?pop={1}&ids={2}".format(queue_name, - pop_value, - ids_value) - self.assertRaises(lib_exc.BadRequest, - self.client.delete_messages, uri) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('ea609ee5-a7a2-41a0-a9fb-73e8c7ed8c59') - def test_delete_messages_with_invalid_auth_token(self): - # Delete message with an invalid token - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - message_body = self.generate_message_body(repeat=1) - resp, body = self.post_messages(queue_name, message_body) - message_uri = body['resources'][0] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None) - self.assertRaises(lib_exc.Unauthorized, - self.client.delete_messages, - message_uri) - - @classmethod - def resource_cleanup(cls): - for queue_name in cls.queues: - cls.client.delete_queue(queue_name) - super(TestMessagesNegative, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_queues.py b/zaqar/tests/tempest_plugin/tests/v2/test_queues.py deleted file mode 100644 index 492f441f..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_queues.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from six import moves -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from testtools import matchers - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestQueues(base.BaseV2MessagingTest): - - @decorators.idempotent_id('f2db96f3-fa02-426a-9b42-5806e12f14d4') - def test_create_delete_queue(self): - # Create & Delete Queue - queue_name = data_utils.rand_name('test') - _, body = self.create_queue(queue_name) - - self.addCleanup(self.client.delete_queue, queue_name) - # NOTE(gmann): create_queue returns response status code as 201 - # so specifically checking the expected empty response body as - # this is not going to be checked in response_checker(). - self.assertEqual('', body) - - self.delete_queue(queue_name) - # lazy queue - self.client.show_queue(queue_name) - - -class TestManageQueue(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestManageQueue, cls).resource_setup() - cls.queues = list() - for _ in moves.xrange(5): - queue_name = data_utils.rand_name('Queues-Test') - cls.queues.append(queue_name) - # Create Queue - cls.client.create_queue(queue_name) - - def _post_messages(self, repeat=CONF.messaging.max_messages_per_page, - queue_name=None): - message_body = self.generate_message_body(repeat=repeat) - resp, body = self.post_messages(queue_name=queue_name, - rbody=message_body) - return resp, body - - def _create_subscriptions(self, queue_name): - bodys = self.generate_subscription_body() - results = [] - for body in bodys: - resp, body = self.create_subscription(queue_name=queue_name, - rbody=body) - results.append((resp, body)) - return results - - @decorators.idempotent_id('8f1fec00-54fc-48b9-aa67-c10a824b768d') - def test_list_queues(self): - # Listing queues - _, body = self.list_queues() - self.assertEqual(len(body['queues']), len(self.queues)) - for item in body['queues']: - self.assertIn(item['name'], self.queues) - - @decorators.idempotent_id('e96466e7-d43f-48f9-bfe8-59e3d40f6868') - def test_get_queue_stats(self): - # Retrieve random queue - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Get Queue Stats for a newly created Queue - _, body = self.get_queue_stats(queue_name) - msgs = body['messages'] - for element in ('free', 'claimed', 'total'): - self.assertEqual(0, msgs[element]) - for element in ('oldest', 'newest'): - self.assertNotIn(element, msgs) - - @decorators.skip_because(bug='1543900') - @decorators.idempotent_id('dfb1e0b0-b481-4e2a-91ae-2c28b65e9c28') - def test_set_and_get_queue_metadata(self): - # Retrieve random queue - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - # Check the Queue has no metadata - _, body = self.get_queue_metadata(queue_name) - self.assertThat(body, matchers.HasLength(0)) - # Create metadata - key3 = [0, 1, 2, 3, 4] - key2 = data_utils.rand_name('value') - req_body1 = dict() - req_body1[data_utils.rand_name('key3')] = key3 - req_body1[data_utils.rand_name('key2')] = key2 - req_body = dict() - req_body[data_utils.rand_name('key1')] = req_body1 - # Set Queue Metadata - self.set_queue_metadata(queue_name, req_body) - - # Get Queue Metadata - _, body = self.get_queue_metadata(queue_name) - self.assertThat(body, matchers.Equals(req_body)) - - @decorators.idempotent_id('2fb6e5a8-c18f-4407-9ee7-7a13c8e09f69') - def test_purge_queue(self): - queue_name = self.queues[0] - # The queue contains no messages and subscriptions by default. - resp, body = self.list_messages(queue_name=queue_name) - self.assertEqual([], body['messages']) - resp, body = self.list_subscription(queue_name) - self.assertEqual([], body['subscriptions']) - # Post some messages and create some subscriptions for the queue. - self._post_messages(queue_name=queue_name) - self._create_subscriptions(queue_name=queue_name) - # The queue contains messages and subscriptions now. - resp, body = self.list_messages(queue_name=queue_name) - self.assertIsNotNone(len(body['messages'])) - resp, body = self.list_subscription(queue_name) - self.assertIsNotNone(len(body['subscriptions'])) - # Purge the queue - resp, body = self.purge_queue(queue_name) - self.assertEqual(204, resp.status) - # The queue contains nothing. - resp, body = self.list_messages(queue_name=queue_name) - self.assertEqual([], body['messages']) - resp, body = self.list_subscription(queue_name) - self.assertEqual([], body['subscriptions']) - - @classmethod - def resource_cleanup(cls): - for queue_name in cls.queues: - cls.client.delete_queue(queue_name) - super(TestManageQueue, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_queues_negative.py b/zaqar/tests/tempest_plugin/tests/v2/test_queues_negative.py deleted file mode 100644 index 65a43e21..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_queues_negative.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) 2016 LARSEN & TOUBRO LIMITED. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six import moves -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from zaqar.tests.tempest_plugin.tests import base - - -class QueueNegativeTestJSON(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(QueueNegativeTestJSON, cls).resource_setup() - cls.queues = list() - for _ in moves.xrange(1): - queue_name = data_utils.rand_name('Queues-Test') - cls.queues.append(queue_name) - cls.client.create_queue(queue_name) - - # Create Queues - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('77634fd0-0a25-4cc7-a01c-b6d16304f907') - def test_queue_has_a_long_name(self): - # Length of queue name should >= 1 and <=64 bytes - queue_name = 'q' * 65 - self.assertRaises(lib_exc.BadRequest, - self.client.create_queue, - queue_name) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('639206ad-d74c-4f51-895d-76e2c7dff60b') - def test_queue_name_is_not_specified(self): - # Length of queue name should >= 1 and <=64 bytes - queue_name = ' ' - self.assertRaises(lib_exc.UnexpectedResponseCode, - self.client.create_queue, - queue_name) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('3ca0e180-c770-4922-8a48-9563c484aaed') - def test_queue_name_has_a_invalid_character_set(self): - # Invalid name with characters - queue_name = '@$@^qw@' - self.assertRaises(lib_exc.BadRequest, - self.client.create_queue, - queue_name) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('533c5a65-fcc9-4e07-84bc-82ac0c007dbc') - def test_queue_name_with_non_ASCII_characters(self): - # Invalid name with non-ASCII characters - queue_name = data_utils.rand_name('\u6c49\u5b57\u6f22\u5b57') - self.assertRaises(lib_exc.BadRequest, - self.client.create_queue, - queue_name) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('44775212-2b79-40c7-8604-fcf01eddba79') - def test_queue_name_with_numeric_values(self): - # Numeric values for queue name - queue_name = data_utils.rand_int_id() - resp, _ = self.client.create_queue(queue_name) - self.assertEqual('201', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('2ce4f4c1-cbaa-4c2d-b28a-f562aec037aa') - def test_create_queue_with_invalid_auth_token(self): - # Create queue with empty headers - # X-Auth-Token is not provided - queue_name = data_utils.rand_name(name='queue') - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.create_queue, - queue_name) - - # List Queues - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('d4d33596-0f06-4911-aecc-17512c00a301') - def test_request_a_nonexistent_queue(self): - # List a non-existent queue - nonexistent_queuename = data_utils.rand_name('rand_queuename') - resp, _ = self.client.show_queue(nonexistent_queuename) - self.assertEqual('200', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('0c8122a8-e28b-4320-8f1f-af97a0bfa26b') - def test_request_after_deleting_queue(self): - # Request queue after deleting the queue - # DELETE is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - self.client.delete_queue(queue_name) - resp, _ = self.client.show_queue(queue_name) - self.assertEqual('200', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('b7c4521a-d0f1-4fc6-b99d-ece2131ac082') - def test_request_with_a_greater_limit_value(self): - # Limit for listing queues is 20 , configurable - params = {'limit': '200'} - self.assertRaises(lib_exc.BadRequest, - self.client.list_queues, - url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('121e5171-e189-4be5-8ccf-d0b2009b3bbe') - def test_request_with_zero_limit_value(self): - # Limit for listing queues is 20 , configurable - params = {'limit': '0'} - self.assertRaises(lib_exc.BadRequest, - self.client.list_queues, - url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('6c710fa6-9447-4c2c-b8c0-7581a56b4ab5') - def test_request_with_negative_limit_value(self): - # Limit for listing queues is 20 , configurable - params = {'limit': '-1'} - self.assertRaises(lib_exc.BadRequest, - self.client.list_queues, - url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('4a54b60c-0a6a-4662-9ba1-fe0b9dd4f399') - def test_with_non_boolean_value_for_detailed(self): - # Value for detailed parameter should be true or false - params = {'detailed': 'None'} - self.assertRaises(lib_exc.BadRequest, - self.client.list_queues, url_params=params) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('f66f1225-bfe8-4fe0-b8c9-35e4342e0f0e') - def test_list_queues_with_invalid_auth_token(self): - # List queue with empty headers - # X-Auth-Token is not provided - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.list_queues) - - # Get Queue Stats - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('16cec0df-b58a-44e8-9132-f99f0c1da29a') - def test_request_stats_for_a_non_existing_queue(self): - # Show stats for a non-existent queue - nonexistent_queuename = data_utils.rand_name('rand_queuename') - resp, _ = self.client.show_queue_stats(nonexistent_queuename) - self.assertEqual('200', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('1cad4984-3f66-48f6-82c9-9a544be78ca6') - def test_request_queue_stats_after_deleting_queue(self): - # List queue stats after deleting the queue - # DELETE is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - self.client.delete_queue(queue_name) - resp, _ = self.client.show_queue_stats(queue_name) - self.assertEqual('200', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('2b1aeba8-a314-495b-8d45-84692354a013') - def test_request_queue_stats_with_invalid_auth_token(self): - # Get queue stats with empty headers - # X-Auth-Token is not provided - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.show_queue_stats, - queue_name) - - # Delete Queues - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('cf7d5cff-0e4f-4d2c-82eb-59f450ca1b7d') - def test_delete_a_non_existing_queue(self): - # Delete is an idempotent operation - non_existent_queue = data_utils.rand_name('Queue_name') - resp, _ = self.client.delete_queue(non_existent_queue) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('c5973d87-5b59-446c-8e81-a8e28de9e61d') - def test_delete_the_deleted_queue(self): - # Delete is an idempotent operation - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - self.client.delete_queue(queue_name) - # Delete again - resp, _ = self.client.delete_queue(queue_name) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a54e2715-478a-4701-9080-a06b9364dc74') - def test_delete_queue_with_invalid_auth_token(self): - # Delete queue with empty headers - # X-Auth-Token is not provided - queue_name = self.queues[data_utils.rand_int_id(0, - len(self.queues) - 1)] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.delete_queue, - queue_name) - - @classmethod - def resource_cleanup(cls): - for queue_name in cls.queues: - cls.client.delete_queue(queue_name) - super(QueueNegativeTestJSON, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_subscriptions.py b/zaqar/tests/tempest_plugin/tests/v2/test_subscriptions.py deleted file mode 100644 index 4d6f4f7e..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_subscriptions.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) 2016 HuaWei, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -from oslo_utils import uuidutils -from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators - -from zaqar.tests.tempest_plugin.tests import base - - -class TestSubscriptions(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestSubscriptions, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.client.create_queue(cls.queue_name) - - def _create_subscriptions(self): - bodys = self.generate_subscription_body() - results = [] - for body in bodys: - resp, body = self.create_subscription(queue_name=self.queue_name, - rbody=body) - results.append((resp, body)) - return results - - @decorators.idempotent_id('425d5afb-31d8-40ea-a23a-ef3f5554f7cc') - def test_create_delete_subscriptions(self): - # create all kinds of subscriptions - results = self._create_subscriptions() - # delete them - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.idempotent_id('a8776d93-895f-4947-a6b0-d0da50bfd5e8') - def test_list_subscriptions(self): - # create all kinds of subscriptions - results = self._create_subscriptions() - # list them - resp, body = self.list_subscription(self.queue_name) - self.assertEqual('200', resp['status']) - self.assertEqual(3, len(body['subscriptions'])) - # delete them - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.idempotent_id('de3d4a35-c5de-4f40-b6ad-7df187bf3831') - def test_show_subscriptions(self): - # create all kinds of subscriptions - results = self._create_subscriptions() - # get the first one - subscription_id = results[0][1]["subscription_id"] - resp, body = self.show_subscription(self.queue_name, subscription_id) - self.assertEqual('200', resp['status']) - self.assertEqual('http://fake:8080', body['subscriber']) - # delete them - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.idempotent_id('90489fa2-893d-4062-b2bd-29bdd06f54f3') - def test_update_subscriptions(self): - # create all kinds of subscriptions - results = self._create_subscriptions() - # update the first one - subscription_id = results[0][1]["subscription_id"] - rbody = {'options': {'test': 'updated'}} - self.update_subscription(self.queue_name, subscription_id, rbody) - # get the new one - resp, body = self.show_subscription(self.queue_name, subscription_id) - self.assertEqual('200', resp['status']) - self.assertEqual(rbody['options'], body['options']) - # delete them - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.idempotent_id('fe0d8ec1-1a64-4490-8869-e821b2252e74') - def test_create_subscriptions_with_duplicate_subscriber(self): - # Adding subscriptions to the queue - results = self._create_subscriptions() - s_id1 = results[0][1]['subscription_id'] - - # Adding a subscription with duplicate subscriber, it will reconfirm - # the subscription and run well. - rbody = {'subscriber': 'http://fake:8080', - 'options': {'MessagingKeyMsg': 'MessagingValueMsg'}, - 'ttl': 293305} - resp, body = self.create_subscription(self.queue_name, rbody) - s_id2 = body['subscription_id'] - - self.assertEqual('201', resp['status']) - self.assertEqual(s_id2, s_id1) - - # Delete the subscriptions created - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.idempotent_id('ff4344b4-ba78-44c5-9ffc-44e53e484f76') - def test_trust_subscription(self): - sub_queue = data_utils.rand_name('Queues-Test') - self.addCleanup(self.client.delete_queue, sub_queue) - subscriber = 'trust+{0}/{1}/queues/{2}/messages'.format( - self.client.base_url, self.client.uri_prefix, sub_queue) - post_body = json.dumps( - {'messages': [{'body': '$zaqar_message$', 'ttl': 60}]}) - post_headers = {'X-Project-ID': self.client.tenant_id, - 'Client-ID': uuidutils.generate_uuid()} - sub_body = {'ttl': 1200, 'subscriber': subscriber, - 'options': {'post_data': post_body, - 'post_headers': post_headers}} - - self.create_subscription(queue_name=self.queue_name, rbody=sub_body) - message_body = self.generate_message_body() - self.post_messages(queue_name=self.queue_name, rbody=message_body) - - if not test_utils.call_until_true( - lambda: self.list_messages(sub_queue)[1]['messages'], 10, 1): - self.fail("Couldn't get messages") - _, body = self.list_messages(sub_queue) - expected = message_body['messages'][0] - expected['queue_name'] = self.queue_name - expected['Message_Type'] = 'Notification' - for message in body['messages']: - # There are two message in the queue. One is the confirm message, - # the other one is the notification. - if message['body']['Message_Type'] == 'Notification': - self.assertEqual(expected, message['body']) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestSubscriptions, cls).resource_cleanup() diff --git a/zaqar/tests/tempest_plugin/tests/v2/test_subscriptions_negative.py b/zaqar/tests/tempest_plugin/tests/v2/test_subscriptions_negative.py deleted file mode 100644 index a836752c..00000000 --- a/zaqar/tests/tempest_plugin/tests/v2/test_subscriptions_negative.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) 2016 LARSEN & TOUBRO LIMITED. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from zaqar.tests.tempest_plugin.tests import base - -CONF = config.CONF - - -class TestSubscriptionsNegative(base.BaseV2MessagingTest): - - @classmethod - def resource_setup(cls): - super(TestSubscriptionsNegative, cls).resource_setup() - cls.queue_name = data_utils.rand_name('Queues-Test') - # Create Queue - cls.client.create_queue(cls.queue_name) - - def _create_subscriptions(self): - bodys = self.generate_subscription_body() - results = [] - for body in bodys: - resp, body = self.create_subscription(queue_name=self.queue_name, - rbody=body) - results.append((resp, body)) - return results - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('0bda2907-a783-4614-af16-23d7a7d53b72') - def test_create_subscriptions_with_invalid_body(self): - # Missing subscriber parameter in body - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - option_body = {key: value} - rbody = {'options': option_body, 'ttl': message_ttl} - self.assertRaises(lib_exc.BadRequest, - self.create_subscription, self.queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('36601d23-77d5-42b1-b234-6789acdda7ba') - def test_create_subscriptions_with_no_body(self): - # Missing parameters in body - rbody = {} - self.assertRaises(lib_exc.BadRequest, - self.create_subscription, self.queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('1d510d93-635f-4161-b071-91f838d6907e') - def test_create_subscriptions_with_invalid_subscriber(self): - # The subscriber type of subscription must be supported in the list - # ['http', 'https', 'mailto'] - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - option_body = {key: value} - subscriber = 'fake' - rbody = {'options': option_body, 'ttl': message_ttl, - 'subscriber': subscriber} - self.assertRaises(lib_exc.BadRequest, - self.create_subscription, self.queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('65be33a4-a063-47e1-b56b-9d7aa979bbcb') - def test_create_subscriptions_with_unsupported_subscriber(self): - # The subscriber type of subscription must be supported in the list - # ['http', 'https', 'mailto'] - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - option_body = {key: value} - subscriber = 'email://fake' - rbody = {'options': option_body, 'ttl': message_ttl, - 'subscriber': subscriber} - self.assertRaises(lib_exc.BadRequest, - self.create_subscription, self.queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('cada6c25-0f59-4021-a4c3-961945913998') - def test_create_subscriptions_with_invalid_options(self): - # Options must be a dict - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - option_body = '123' - subscriber = 'http://fake:8080' - rbody = {'options': option_body, 'ttl': message_ttl, - 'subscriber': subscriber} - self.assertRaises(lib_exc.BadRequest, - self.create_subscription, self.queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('84c1e298-c632-4ccb-859f-afe9a390081c') - def test_create_subscriptions_with_non_integer_value_for_ttl(self): - # The subscriber type of subscription must be supported in the list - # ['http', 'https', 'mailto'] - message_ttl = "123" - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - option_body = {key: value} - subscriber = 'http://fake:8080' - rbody = {'options': option_body, 'ttl': message_ttl, - 'subscriber': subscriber} - self.assertRaises(lib_exc.BadRequest, - self.create_subscription, self.queue_name, rbody) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('1302e137-4db6-48ad-b779-ef2095198bc2') - def test_create_a_subscription_without_a_token(self): - # X-Auth-Token is not provided - message_ttl = data_utils.\ - rand_int_id(start=60, end=CONF.messaging.max_message_ttl) - key = data_utils.arbitrary_string(size=20, base_text='MessagingKey') - value = data_utils.arbitrary_string(size=20, - base_text='MessagingValue') - option_body = {key: value} - subscriber = 'http://fake:8080' - rbody = {'options': option_body, 'ttl': message_ttl, - 'subscriber': subscriber} - - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.create_subscription, self.queue_name, rbody) - - # List Subscriptions - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('e2109835-34ad-4f0a-8bbb-43d475d1315d') - def test_list_subscriptions_from_non_existing_queue(self): - # Request for listing subscriptions from a non existent queue - non_existent_queue = data_utils.rand_name('rand_queuename') - resp, _ = self.client.list_subscription(non_existent_queue) - self.assertEqual('200', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('95d7c77f-4912-49ce-9f38-cfcc6d5cd65b') - def test_list_subscriptions_from_queue_with_no_subsciptions(self): - # Request to list subscription - resp, _ = self.client.list_subscription(self.queue_name) - self.assertEqual('200', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('72f8c0b7-23d8-40ef-ae7c-212cc0751946') - def test_list_subscription_without_a_token(self): - # X-Auth-Token is not provided - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.list_subscription, self.queue_name) - - # Show Subscriptions - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('7ecc2cb9-a0f4-4d03-b903-ecf2917fda13') - def test_show_subscriptions_from_non_existing_queue(self): - # Show subscription details from a non existent queue - non_existent_queue = data_utils.rand_name('rand_queuename') - invalid_id = '123' - self.assertRaises(lib_exc.NotFound, - self.show_subscription, non_existent_queue, - invalid_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('bb46d838-e9f9-4851-a788-c30bff41c484') - def test_show_subscriptions_with_invalid_id(self): - # Show subscription details with invaild id - invalid_id = '123' - self.assertRaises(lib_exc.NotFound, - self.show_subscription, self.queue_name, invalid_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('1120f006-397a-4e8b-9e79-e2dc96b37d46') - def test_show_subscriptions_after_deleting_subscription(self): - # Create subscription - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - # Delete subscription - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - # Show the details of the subscription - self.assertRaises(lib_exc.NotFound, - self.show_subscription, self.queue_name, - subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('47a3f29f-6ddb-4cf2-87ed-a2b97733f386') - def test_show_subscription_without_a_token(self): - # X-Auth-Token is not provided - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.show_subscription, self.queue_name, - subscription_id) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - # Update Subscriptions - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('5c93b468-cb84-424f-af35-d4f5febc7c56') - def test_update_subscription_on_non_existing_queue(self): - # Update subscription on a non existing queue - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - non_existent_queue = data_utils.rand_name('rand_queuename') - update_rbody = {'ttl': 1000} - self.assertRaises(lib_exc.NotFound, self.client.update_subscription, - non_existent_queue, subscription_id, update_rbody) - - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('b383a29a-08f1-418f-8adb-c29ef080358c') - def test_update_subscription_with_invalid_id(self): - # Update subscription using invalid id - results = self._create_subscriptions() - subscription_id = uuidutils.generate_uuid() - update_rbody = {'ttl': 100} - self.assertRaises(lib_exc.NotFound, - self.client.update_subscription, self.queue_name, - subscription_id, update_rbody) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('4e446118-fa90-4f67-9a91-e157fbaa5a4c') - def test_update_subscription_with_empty_body(self): - # Update subscription with no body - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - update_rbody = {' '} - self.assertRaises(lib_exc.BadRequest, - self.client.update_subscription, self.queue_name, - subscription_id, update_rbody) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('966f5356-9d0b-46c6-9d57-26bcd9d8e699') - def test_update_subscription_with_invalid_TTL(self): - # Update subscription using invalid TTL - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - update_rbody = {'ttl': 50} - self.assertRaises(lib_exc.BadRequest, - self.client.update_subscription, self.queue_name, - subscription_id, update_rbody) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('8838f3b2-d4c3-42e2-840c-4314e334a2f0') - def test_update_subscription_with_invalid_json_in_request_body(self): - # Update subscription with invalid json - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - update_rbody = {"123"} - self.assertRaises(lib_exc.BadRequest, - self.client.update_subscription, self.queue_name, - subscription_id, update_rbody) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('8bfe5638-0126-483e-b88a-2767fa6564e6') - def test_update_subscription_with_invalid_token(self): - # X-Auth-Token is not provided - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - update_rbody = {"ttl": "1000"} - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.update_subscription, self.queue_name, - subscription_id, update_rbody) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - # Delete Subscriptions - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('bb885255-ccac-47e1-a491-2630f205df58') - def test_delete_subscription_from_a_non_existing_queue(self): - # Delete subscription from a non existing queue - rbody = {'subscriber': 'http://fake123:8080', - 'options': {'MessagingKey': 'MessagingValue'}, - 'ttl': 2935} - results = self.create_subscription(self.queue_name, rbody) - subscription_id = results[1]["subscription_id"] - non_existent_queue = data_utils.rand_name('rand_queuename') - resp, _ = self.client.delete_subscription(non_existent_queue, - subscription_id) - self.assertEqual('204', resp['status']) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('a7007b4b-1ab1-4121-9d59-afe5eb82d31c') - def test_delete_subscription_using_a_nonexisting_id(self): - # Delete subscription with non existent id - results = self._create_subscriptions() - subscription_id = uuidutils.generate_uuid() - resp, _ = self.client.delete_subscription(self.queue_name, - subscription_id) - self.assertEqual('204', resp['status']) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('8faf37ee-4abe-4586-9e4b-ed896129a3e8') - def test_delete_subscription_with_invalid_token(self): - # X-Auth-Token is not provided - results = self._create_subscriptions() - subscription_id = results[0][1]["subscription_id"] - self.client.auth_provider.set_alt_auth_data( - request_part='headers', - auth_data=None - ) - self.assertRaises(lib_exc.Unauthorized, - self.client.delete_subscription, self.queue_name, - subscription_id) - for result in results: - subscription_id = result[1]["subscription_id"] - self.delete_subscription(self.queue_name, subscription_id) - - @classmethod - def resource_cleanup(cls): - cls.delete_queue(cls.queue_name) - super(TestSubscriptionsNegative, cls).resource_cleanup() diff --git a/zaqar/tests/unit/__init__.py b/zaqar/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/common/__init__.py b/zaqar/tests/unit/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/common/storage/__init__.py b/zaqar/tests/unit/common/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/common/storage/test_select.py b/zaqar/tests/unit/common/storage/test_select.py deleted file mode 100644 index 11f2a324..00000000 --- a/zaqar/tests/unit/common/storage/test_select.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import testtools - -from zaqar.common.storage import select - - -class TestSelect(testtools.TestCase): - - def test_weighted_returns_none_if_no_objs(self): - self.assertIsNone(select.weighted([])) - - def test_weighted_returns_none_if_objs_have_zero_weight(self): - objs = [{'weight': 0, 'name': str(i)} for i in range(2)] - self.assertIsNone(select.weighted(objs)) - - def test_weighted_ignores_zero_weight_objs(self): - objs = [{'weight': 0, 'name': str(i)} for i in range(2)] - expect = {'weight': 1, 'name': 'theone'} - objs.append(expect) - self.assertEqual(expect, select.weighted(objs)) - - def test_weighted_returns_an_object_it_was_given(self): - objs = [{'weight': 10, 'name': str(i)} for i in range(10)] - ret = select.weighted(objs) - self.assertIn(ret, objs) - - def test_weighted_returns_none_if_selector_oob(self): - objs = [{'weight': 10, 'name': str(i)} for i in range(10)] - sum_weights = sum([o['weight'] for o in objs]) - capped_gen = lambda x, y: sum_weights - self.assertIsNone(select.weighted(objs, - generator=capped_gen)) - - def test_weighted_returns_first_if_selector_is_zero(self): - objs = [{'weight': 10, 'name': str(i)} for i in range(10)] - zero_gen = lambda x, y: 0 - self.assertEqual(objs[0], - select.weighted(objs, generator=zero_gen)) - - def test_weighted_returns_last_if_selector_is_sum_minus_one(self): - objs = [{'weight': 10, 'name': str(i)} for i in range(10)] - sum_weights = sum([o['weight'] for o in objs]) - capped_gen = lambda x, y: sum_weights - 1 - self.assertEqual(objs[-1], - select.weighted(objs, generator=capped_gen)) - - def test_weighted_boundaries(self): - objs = [{'weight': 1, 'name': str(i)} for i in range(3)] - for i in range(len(objs)): - fixed_gen = lambda x, y: i - self.assertEqual(objs[i], - select.weighted(objs, generator=fixed_gen)) diff --git a/zaqar/tests/unit/common/storage/test_utils.py b/zaqar/tests/unit/common/storage/test_utils.py deleted file mode 100644 index ae6e4f03..00000000 --- a/zaqar/tests/unit/common/storage/test_utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2014 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ddt - -from zaqar.common import configs -from zaqar.storage import utils -from zaqar import tests as testing - - -@ddt.ddt -class TestUtils(testing.TestBase): - - def setUp(self): - super(TestUtils, self).setUp() - self.conf.register_opts(configs._GENERAL_OPTIONS) - - @testing.requires_mongodb - def test_can_connect_succeeds_if_good_uri_mongo(self): - self.config(unreliable=True) - self.assertTrue(utils.can_connect(self.mongodb_url, - conf=self.conf)) - - @testing.requires_redis - def test_can_connect_succeeds_if_good_uri_redis(self): - self.assertTrue(utils.can_connect('redis://localhost', - conf=self.conf)) - self.assertTrue(utils.can_connect('redis://localhost:6379', - conf=self.conf)) - - def test_can_connect_fails_if_bad_uri_missing_schema(self): - self.assertFalse(utils.can_connect('localhost:27017', - conf=self.conf)) - - @testing.requires_mongodb - def test_can_connect_fails_if_bad_uri_mongodb(self): - self.config(unreliable=True) - - uri = 'mongodb://localhost:8080?connectTimeoutMS=100' - self.assertFalse(utils.can_connect(uri, conf=self.conf)) - - uri = 'mongodb://example.com:27017?connectTimeoutMS=100' - self.assertFalse(utils.can_connect(uri, conf=self.conf)) - - @testing.requires_redis - def test_can_connect_fails_if_bad_uri_redis(self): - self.assertFalse(utils.can_connect('redis://localhost:8080', - conf=self.conf)) - self.assertFalse(utils.can_connect('redis://example.com:6379', - conf=self.conf)) diff --git a/zaqar/tests/unit/common/test_api.py b/zaqar/tests/unit/common/test_api.py deleted file mode 100644 index 72efbfab..00000000 --- a/zaqar/tests/unit/common/test_api.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.common.api import api -from zaqar.common import errors -from zaqar.tests import base - - -class FakeApi(api.Api): - schema = { - 'test_operation': { - 'ref': 'test/{name}', - 'method': 'GET', - 'properties': { - 'name': {'type': 'string'}, - 'address': {'type': 'string'} - }, - - 'additionalProperties': False, - 'required': ['name'] - } - } - - -class TestApi(base.TestBase): - - def setUp(self): - super(TestApi, self).setUp() - self.api = FakeApi() - - def test_valid_params(self): - self.assertTrue(self.api.validate('test_operation', - {'name': 'Sauron'})) - - def test_invalid_params(self): - self.assertFalse(self.api.validate('test_operation', - {'name': 'Sauron', - 'lastname': 'From Mordor'})) - - def test_missing_params(self): - self.assertFalse(self.api.validate('test_operation', {})) - - def test_invalid_operation(self): - self.assertRaises(errors.InvalidAction, self.api.validate, - 'super_secret_op', {}) diff --git a/zaqar/tests/unit/common/test_decorators.py b/zaqar/tests/unit/common/test_decorators.py deleted file mode 100644 index 90b2f0cf..00000000 --- a/zaqar/tests/unit/common/test_decorators.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import msgpack -from oslo_cache import core -from oslo_config import cfg - -from zaqar.common import cache as oslo_cache -from zaqar.common import configs -from zaqar.common import decorators -from zaqar.tests import base - - -class TestDecorators(base.TestBase): - - def setUp(self): - super(TestDecorators, self).setUp() - self.conf.register_opts(configs._GENERAL_OPTIONS) - - def test_memoized_getattr(self): - - class TestClass(object): - - @decorators.memoized_getattr - def __getattr__(self, name): - return name - - instance = TestClass() - result = instance.testing - self.assertEqual('testing', result) - self.assertIn('testing', instance.__dict__) - - def test_cached(self): - conf = cfg.ConfigOpts() - oslo_cache.register_config(conf) - conf.cache.backend = 'dogpile.cache.memory' - conf.cache.enabled = True - cache = oslo_cache.get_cache(conf) - - sample_project = { - u'name': u'Cats Abound', - u'bits': b'\x80\x81\x82\x83\x84', - b'key': u'Value. \x80', - } - - def create_key(user, project=None): - return user + ':' + str(project) - - class TestClass(object): - - def __init__(self, cache): - self._cache = cache - self.project_gets = 0 - self.project_dels = 0 - - @decorators.caches(create_key, 60) - def get_project(self, user, project=None): - self.project_gets += 1 - return sample_project - - @get_project.purges - def del_project(self, user, project=None): - self.project_dels += 1 - - instance = TestClass(cache) - - args = ('23', 'cats') - - project = instance.get_project(*args) - self.assertEqual(sample_project, project) - self.assertEqual(1, instance.project_gets) - - # Should be in the cache now. - project = msgpack.unpackb(cache.get(create_key(*args)), - encoding='utf-8') - self.assertEqual(sample_project, project) - - # Should read from the cache this time (counter will not - # be incremented). - project = instance.get_project(*args) - self.assertEqual(sample_project, project) - self.assertEqual(1, instance.project_gets) - - # Use kwargs this time - instance.del_project('23', project='cats') - self.assertEqual(1, instance.project_dels) - - # Should be a cache miss since we purged (above) - project = instance.get_project(*args) - self.assertEqual(2, instance.project_gets) - - def test_cached_with_cond(self): - conf = cfg.ConfigOpts() - oslo_cache.register_config(conf) - conf.cache.backend = 'dogpile.cache.memory' - conf.cache.enabled = True - cache = oslo_cache.get_cache(conf) - - class TestClass(object): - - def __init__(self, cache): - self._cache = cache - self.user_gets = 0 - - @decorators.caches(lambda x: x, 60, lambda v: v != 'kgriffs') - def get_user(self, name): - self.user_gets += 1 - return name - - instance = TestClass(cache) - - name = 'malini' - - user = instance.get_user(name) - self.assertEqual(name, user) - self.assertEqual(1, instance.user_gets) - - # Should be in the cache now. - user = msgpack.unpackb(cache.get(name), encoding='utf-8') - self.assertEqual(name, user) - - # Should read from the cache this time (counter will not - # be incremented). - user = instance.get_user(name) - self.assertEqual(name, user) - self.assertEqual(1, instance.user_gets) - - # Won't go into the cache because of cond - name = 'kgriffs' - for i in range(3): - user = instance.get_user(name) - - self.assertEqual(cache.get(name), core.NO_VALUE) - - self.assertEqual(name, user) - self.assertEqual(2 + i, instance.user_gets) - - def test_api_version_manager(self): - self.config(enable_deprecated_api_versions=[]) - # 1. Test accessing current API version - VERSION = { - 'id': '1', - 'status': 'CURRENT', - 'updated': 'Just yesterday' - } - - @decorators.api_version_manager(VERSION) - def public_endpoint_1(driver, conf): - return True - - self.assertTrue(public_endpoint_1(None, self.conf)) - - # 2. Test accessing deprecated API version - VERSION = { - 'id': '1', - 'status': 'DEPRECATED', - 'updated': 'A long time ago' - } - - @decorators.api_version_manager(VERSION) - def public_endpoint_2(driver, conf): - self.fail('Deprecated API enabled') - - public_endpoint_2(None, self.conf) - - # 3. Test enabling deprecated API version - self.config(enable_deprecated_api_versions=[['1']]) - - @decorators.api_version_manager(VERSION) - def public_endpoint_3(driver, conf): - return True - - self.assertTrue(public_endpoint_3(None, self.conf)) diff --git a/zaqar/tests/unit/common/test_pipeline.py b/zaqar/tests/unit/common/test_pipeline.py deleted file mode 100644 index 8f47fdb4..00000000 --- a/zaqar/tests/unit/common/test_pipeline.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar.common import pipeline -from zaqar.tests import base - - -class FirstClass(object): - - def with_args(self, name): - return name - - def with_kwargs(self, lastname='yo'): - return lastname - - def with_args_kwargs(self, name, lastname='yo'): - return '{0} {1}'.format(name, lastname) - - def no_args(self): - return True - - def does_nothing(self): - return None - - def calls_the_latest(self): - return None - - -class SecondClass(object): - - def does_nothing(self): - return None - - def calls_the_latest(self): - return True - - def _raise_rterror(self): - raise RuntimeError("It shouldn't get here!") - - # NOTE(flaper87): This methods will be used to test - # that the pipeline stops at the first class returning - # something. - with_args = with_kwargs = no_args = _raise_rterror - - -class TestPipeLine(base.TestBase): - - def setUp(self): - super(TestPipeLine, self).setUp() - self.pipeline = pipeline.Pipeline([FirstClass(), - SecondClass()]) - - def test_attribute_error(self): - consumer = self.pipeline.does_not_exist - self.assertRaises(AttributeError, consumer) - - def test_with_args(self): - name = 'James' - self.assertEqual(name, self.pipeline.with_args(name)) - - def test_with_kwargs(self): - lastname = 'Bond' - self.assertEqual(lastname, self.pipeline.with_kwargs(lastname)) - self.assertEqual(lastname, - self.pipeline.with_kwargs(lastname=lastname)) - - def test_with_args_kwargs(self): - fullname = 'James Bond' - name, lastname = fullname.split() - result = self.pipeline.with_args_kwargs(name, lastname=lastname) - self.assertEqual(fullname, result) - - def test_does_nothing(self): - self.assertIsNone(self.pipeline.does_nothing()) - - def test_calls_the_latest(self): - self.assertTrue(self.pipeline.calls_the_latest()) - - def test_pipeline_context_manager(self): - ctxt = self.pipeline.consumer_for('does_nothing') - - with ctxt as consumer: - self.assertIsNone(consumer()) diff --git a/zaqar/tests/unit/common/test_request.py b/zaqar/tests/unit/common/test_request.py deleted file mode 100644 index 73270432..00000000 --- a/zaqar/tests/unit/common/test_request.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from zaqar.common.api import request -from zaqar.common import consts -from zaqar.tests import base - - -class TestRequest(base.TestBase): - - def test_request(self): - action = consts.MESSAGE_POST - data = 'body' - env = {'foo': 'bar'} - req = request.Request(action=action, body=data, env=env) - self.assertEqual({'foo': 'bar'}, req._env) - self.assertEqual('body', req._body) - self.assertEqual(consts.MESSAGE_POST, req._action) diff --git a/zaqar/tests/unit/common/test_urls.py b/zaqar/tests/unit/common/test_urls.py deleted file mode 100644 index 8ce3a1af..00000000 --- a/zaqar/tests/unit/common/test_urls.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import hashlib -import hmac - -from oslo_utils import timeutils -import six - -from zaqar.common import urls -from zaqar.tests import base - - -class TestURLs(base.TestBase): - - def test_create_signed_url(self): - timeutils.set_time_override() - self.addCleanup(timeutils.clear_time_override) - - key = six.b('test') - methods = ['POST'] - project = 'my-project' - paths = ['/v2/queues/shared/messages'] - expires = timeutils.utcnow() + datetime.timedelta(days=1) - expires_str = expires.strftime(urls._DATE_FORMAT) - - hmac_body = six.b(r'%(paths)s\n%(methods)s\n' - r'%(project)s\n%(expires)s' % - {'paths': ','.join(paths), - 'methods': ','.join(methods), - 'project': project, 'expires': expires_str}) - - expected = hmac.new(key, hmac_body, hashlib.sha256).hexdigest() - actual = urls.create_signed_url(key, paths, methods=['POST'], - project=project) - self.assertEqual(expected, actual['signature']) - - def test_create_signed_url_multiple_paths(self): - timeutils.set_time_override() - self.addCleanup(timeutils.clear_time_override) - - key = six.b('test') - methods = ['POST'] - project = 'my-project' - paths = ['/v2/queues/shared/messages', - '/v2/queues/shared/subscriptions'] - expires = timeutils.utcnow() + datetime.timedelta(days=1) - expires_str = expires.strftime(urls._DATE_FORMAT) - - hmac_body = six.b(r'%(paths)s\n%(methods)s\n' - r'%(project)s\n%(expires)s' % - {'paths': ','.join(paths), - 'methods': ','.join(methods), - 'project': project, 'expires': expires_str}) - - expected = hmac.new(key, hmac_body, hashlib.sha256).hexdigest() - actual = urls.create_signed_url(key, paths, methods=['POST'], - project=project) - self.assertEqual(expected, actual['signature']) - - def test_create_signed_url_utc(self): - """Test that the method converts the TZ to UTC.""" - date_str = '2100-05-31T19:00:17+02' - date_str_utc = '2100-05-31T17:00:17' - - key = six.b('test') - project = None - methods = ['GET'] - paths = ['/v2/queues/shared/messages'] - parsed = timeutils.parse_isotime(date_str_utc) - expires = timeutils.normalize_time(parsed) - expires_str = expires.strftime(urls._DATE_FORMAT) - - hmac_body = six.b('%(paths)s\\n%(methods)s\\n' - '%(project)s\\n%(expires)s' % - {'paths': ','.join(paths), - 'methods': ','.join(methods), - 'project': project, 'expires': expires_str}) - - expected = hmac.new(key, hmac_body, hashlib.sha256).hexdigest() - actual = urls.create_signed_url(key, paths, expires=date_str) - self.assertEqual(expected, actual['signature']) - - def test_create_signed_urls_validation(self): - self.assertRaises(ValueError, urls.create_signed_url, None, ['/test']) - self.assertRaises(ValueError, urls.create_signed_url, 'test', None) - self.assertRaises(ValueError, urls.create_signed_url, 'test', - ['/test'], methods='not list') - self.assertRaises(ValueError, urls.create_signed_url, 'test', []) - self.assertRaises(ValueError, urls.create_signed_url, 'test', '/test') - self.assertRaises(ValueError, urls.create_signed_url, 'test', - ['/test'], expires='wrong date format') - self.assertRaises(ValueError, urls.create_signed_url, 'test', - ['/test'], expires='3600') diff --git a/zaqar/tests/unit/hacking/__init__.py b/zaqar/tests/unit/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/hacking/test_hacking.py b/zaqar/tests/unit/hacking/test_hacking.py deleted file mode 100644 index 8a5df9ff..00000000 --- a/zaqar/tests/unit/hacking/test_hacking.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from zaqar.hacking import checks -from zaqar.tests import base - - -class HackingTestCase(base.TestBase): - def test_no_log_translations(self): - for log in checks._all_log_levels: - for hint in checks._all_hints: - bad = 'LOG.%s(%s("Bad"))' % (log, hint) - self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) - # Catch abuses when used with a variable and not a literal - bad = 'LOG.%s(%s(msg))' % (log, hint) - self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) diff --git a/zaqar/tests/unit/notification/__init__.py b/zaqar/tests/unit/notification/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/notification/test_notifier.py b/zaqar/tests/unit/notification/test_notifier.py deleted file mode 100644 index 608c16f3..00000000 --- a/zaqar/tests/unit/notification/test_notifier.py +++ /dev/null @@ -1,413 +0,0 @@ -# Copyright (c) 2014 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import uuid - -import ddt -import mock - -from zaqar.common import urls -from zaqar.notification import notifier -from zaqar import tests as testing - - -@ddt.ddt -class NotifierTest(testing.TestBase): - - def setUp(self): - super(NotifierTest, self).setUp() - self.client_id = uuid.uuid4() - self.project = uuid.uuid4() - self.messages = [{"ttl": 300, - "body": {"event": "BackupStarted", - "backup_id": "c378813c-3f0b-11e2-ad92"} - }, - {"body": {"event": "BackupProgress", - "current_bytes": "0", - "total_bytes": "99614720"} - } - ] - # NOTE(Eva-i): NotifiedDriver adds "queue_name" key to each - # message (dictionary), so final notifications look like this - self.notifications = [{"ttl": 300, - "body": {"event": "BackupStarted", - "backup_id": - "c378813c-3f0b-11e2-ad92"}, - "queue_name": "fake_queue", - "Message_Type": "Notification" - }, - {"body": {"event": "BackupProgress", - "current_bytes": "0", - "total_bytes": "99614720"}, - "queue_name": "fake_queue", - "Message_Type": "Notification" - } - ] - self.api_version = 'v2' - - def test_webhook(self): - subscription = [{'subscriber': 'http://trigger_me', - 'source': 'fake_queue', - 'options': {}}, - {'subscriber': 'http://call_me', - 'source': 'fake_queue', - 'options': {}}, - {'subscriber': 'http://ping_me', - 'source': 'fake_queue', - 'options': {}}] - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=iter([subscription, {}])) - driver = notifier.NotifierDriver(subscription_controller=ctlr) - headers = {'Content-Type': 'application/json'} - with mock.patch('requests.post') as mock_post: - driver.post('fake_queue', self.messages, self.client_id, - self.project) - driver.executor.shutdown() - # Let's deserialize "data" from JSON string to dict in each mock - # call, so we can do dict comparisons. JSON string comparisons - # often fail, because dict keys can be serialized in different - # order inside the string. - for call in mock_post.call_args_list: - call[1]['data'] = json.loads(call[1]['data']) - # These are not real calls. In real calls each "data" argument is - # serialized by json.dumps. But we made a substitution before, - # so it will work. - mock_post.assert_has_calls([ - mock.call(subscription[0]['subscriber'], - data=self.notifications[0], - headers=headers), - mock.call(subscription[1]['subscriber'], - data=self.notifications[0], - headers=headers), - mock.call(subscription[2]['subscriber'], - data=self.notifications[0], - headers=headers), - mock.call(subscription[0]['subscriber'], - data=self.notifications[1], - headers=headers), - mock.call(subscription[1]['subscriber'], - data=self.notifications[1], - headers=headers), - mock.call(subscription[2]['subscriber'], - data=self.notifications[1], - headers=headers), - ], any_order=True) - self.assertEqual(6, len(mock_post.mock_calls)) - - def test_webhook_post_data(self): - post_data = {'foo': 'bar', 'egg': '$zaqar_message$'} - subscription = [{'subscriber': 'http://trigger_me', - 'source': 'fake_queue', - 'options': {'post_data': json.dumps(post_data)}}] - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=iter([subscription, {}])) - driver = notifier.NotifierDriver(subscription_controller=ctlr) - headers = {'Content-Type': 'application/json'} - with mock.patch('requests.post') as mock_post: - driver.post('fake_queue', self.messages, self.client_id, - self.project) - driver.executor.shutdown() - # Let's deserialize "data" from JSON string to dict in each mock - # call, so we can do dict comparisons. JSON string comparisons - # often fail, because dict keys can be serialized in different - # order inside the string. - for call in mock_post.call_args_list: - call[1]['data'] = json.loads(call[1]['data']) - # These are not real calls. In real calls each "data" argument is - # serialized by json.dumps. But we made a substitution before, - # so it will work. - mock_post.assert_has_calls([ - mock.call(subscription[0]['subscriber'], - data={'foo': 'bar', 'egg': self.notifications[0]}, - headers=headers), - mock.call(subscription[0]['subscriber'], - data={'foo': 'bar', 'egg': self.notifications[1]}, - headers=headers), - ], any_order=True) - self.assertEqual(2, len(mock_post.mock_calls)) - - def test_marker(self): - subscription1 = [{'subscriber': 'http://trigger_me1', - 'source': 'fake_queue', - 'options': {}}] - subscription2 = [{'subscriber': 'http://trigger_me2', - 'source': 'fake_queue', - 'options': {}}] - ctlr = mock.MagicMock() - - def mock_list(queue, project, marker): - if not marker: - return iter([subscription1, 'marker_id']) - else: - return iter([subscription2, {}]) - - ctlr.list = mock_list - driver = notifier.NotifierDriver(subscription_controller=ctlr) - headers = {'Content-Type': 'application/json'} - with mock.patch('requests.post') as mock_post: - driver.post('fake_queue', self.messages, self.client_id, - self.project) - driver.executor.shutdown() - # Let's deserialize "data" from JSON string to dict in each mock - # call, so we can do dict comparisons. JSON string comparisons - # often fail, because dict keys can be serialized in different - # order inside the string. - for call in mock_post.call_args_list: - call[1]['data'] = json.loads(call[1]['data']) - # These are not real calls. In real calls each "data" argument is - # serialized by json.dumps. But we made a substitution before, - # so it will work. - mock_post.assert_has_calls([ - mock.call(subscription1[0]['subscriber'], - data=self.notifications[0], - headers=headers), - mock.call(subscription2[0]['subscriber'], - data=self.notifications[0], - headers=headers), - ], any_order=True) - self.assertEqual(4, len(mock_post.mock_calls)) - - @mock.patch('subprocess.Popen') - def test_mailto(self, mock_popen): - subscription = [{'subscriber': 'mailto:aaa@example.com', - 'source': 'fake_queue', - 'options': {'subject': 'Hello', - 'from': 'zaqar@example.com'}}, - {'subscriber': 'mailto:bbb@example.com', - 'source': 'fake_queue', - 'options': {'subject': 'Hello', - 'from': 'zaqar@example.com'}}] - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=iter([subscription, {}])) - driver = notifier.NotifierDriver(subscription_controller=ctlr) - called = set() - msg = ('Content-Type: text/plain; charset="us-ascii"\n' - 'MIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nto:' - ' %(to)s\nfrom: %(from)s\nsubject: %(subject)s\n\n%(body)s') - mail1 = msg % {'to': subscription[0]['subscriber'][7:], - 'from': 'zaqar@example.com', 'subject': 'Hello', - 'body': json.dumps(self.notifications[0])} - mail2 = msg % {'to': subscription[0]['subscriber'][7:], - 'from': 'zaqar@example.com', 'subject': 'Hello', - 'body': json.dumps(self.notifications[1])} - mail3 = msg % {'to': subscription[1]['subscriber'][7:], - 'from': 'zaqar@example.com', 'subject': 'Hello', - 'body': json.dumps(self.notifications[0])} - mail4 = msg % {'to': subscription[1]['subscriber'][7:], - 'from': 'zaqar@example.com', 'subject': 'Hello', - 'body': json.dumps(self.notifications[1])} - - def _communicate(msg): - called.add(msg) - - mock_process = mock.Mock() - attrs = {'communicate': _communicate} - mock_process.configure_mock(**attrs) - mock_popen.return_value = mock_process - driver.post('fake_queue', self.messages, self.client_id, self.project) - driver.executor.shutdown() - - self.assertEqual(4, len(called)) - # Let's deserialize "body" from JSON string to dict and then serialize - # it back to JSON, but sorted, allowing us make comparisons. - mails = {mail1, mail2, mail3, mail4} - mail_options = [] - mail_bodies = [] - for mail in mails: - options, body = mail.split('\n\n') - mail_options.append(options) - mail_bodies.append(json.dumps(json.loads(body), sort_keys=True)) - called_options = [] - called_bodies = [] - for call in called: - options, body = call.split('\n\n') - called_options.append(options) - called_bodies.append(json.dumps(json.loads(body), sort_keys=True)) - self.assertEqual(sorted(mail_options), sorted(called_options)) - self.assertEqual(sorted(mail_bodies), sorted(called_bodies)) - - def test_post_no_subscriber(self): - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=iter([[], {}])) - driver = notifier.NotifierDriver(subscription_controller=ctlr) - with mock.patch('requests.post') as mock_post: - driver.post('fake_queue', self.messages, self.client_id, - self.project) - driver.executor.shutdown() - self.assertEqual(0, mock_post.call_count) - - def test_proper_notification_data(self): - subscription = [{'subscriber': 'http://trigger_me', - 'source': 'fake_queue', - 'options': {}}] - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=iter([subscription, {}])) - driver = notifier.NotifierDriver(subscription_controller=ctlr) - with mock.patch('requests.post') as mock_post: - driver.post('fake_queue', self.messages, self.client_id, - self.project) - driver.executor.shutdown() - self.assertEqual(2, mock_post.call_count) - self.assertEqual(self.notifications[1], - json.loads(mock_post.call_args[1]['data'])) - - @mock.patch('requests.post') - def test_send_confirm_notification(self, mock_request): - self.conf.notification.require_confirmation = True - subscription = {'id': '5760c9fb3990b42e8b7c20bd', - 'subscriber': 'http://trigger_me', - 'source': 'fake_queue', - 'options': {}} - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=subscription) - driver = notifier.NotifierDriver(subscription_controller=ctlr, - require_confirmation=True) - self.conf.signed_url.secret_key = 'test_key' - driver.send_confirm_notification('test_queue', subscription, self.conf, - str(self.project), - api_version=self.api_version) - driver.executor.shutdown() - - self.assertEqual(1, mock_request.call_count) - expect_args = ['SubscribeBody', 'queue_name', 'URL-Methods', - 'X-Project-ID', 'URL-Signature', 'URL-Paths', 'Message', - 'URL-Expires', 'Message_Type', 'WSGISubscribeURL', - 'WebSocketSubscribeURL' 'UnsubscribeBody'] - actual_args = json.loads(mock_request.call_args[1]['data']).keys() - self.assertEqual(expect_args.sort(), - list(actual_args).sort()) - - @mock.patch('requests.post') - def test_send_confirm_notification_without_signed_url(self, mock_request): - subscription = [{'subscriber': 'http://trigger_me', - 'source': 'fake_queue', 'options': {}}] - ctlr = mock.MagicMock() - ctlr.list = mock.Mock(return_value=iter([subscription, {}])) - driver = notifier.NotifierDriver(subscription_controller=ctlr) - - driver.send_confirm_notification('test_queue', subscription, self.conf, - str(self.project), self.api_version) - driver.executor.shutdown() - - self.assertEqual(0, mock_request.call_count) - - @mock.patch.object(urls, 'create_signed_url') - def test_require_confirmation_false(self, mock_create_signed_url): - subscription = [{'subscriber': 'http://trigger_me', - 'source': 'fake_queue', 'options': {}}] - ctlr = mock.MagicMock() - driver = notifier.NotifierDriver(subscription_controller=ctlr, - require_confirmation=False) - - driver.send_confirm_notification('test_queue', subscription, self.conf, - str(self.project), self.api_version) - - self.assertFalse(mock_create_signed_url.called) - - def _make_confirm_string(self, conf, message, queue_name): - confirmation_url = conf.notification.external_confirmation_url - param_string_signature = '?Signature=' + message.get('signature') - param_string_methods = '&Methods=' + message.get('methods')[0] - param_string_paths = '&Paths=' + message.get('paths')[0] - param_string_project = '&Project=' + message.get('project') - param_string_expires = '&Expires=' + message.get('expires') - param_string_confirm_url = '&Url=' + message.get('WSGISubscribeURL', - '') - param_string_queue = '&Queue=' + queue_name - confirm_url_string = (confirmation_url + param_string_signature + - param_string_methods + param_string_paths + - param_string_project + param_string_expires + - param_string_confirm_url + param_string_queue) - return confirm_url_string - - @mock.patch('zaqar.common.urls.create_signed_url') - @mock.patch('subprocess.Popen') - def _send_confirm_notification_with_email(self, mock_popen, - mock_signed_url, - is_unsubscribed=False): - subscription = {'id': '5760c9fb3990b42e8b7c20bd', - 'subscriber': 'mailto:aaa@example.com', - 'source': 'test_queue', - 'options': {'subject': 'Hello', - 'from': 'zaqar@example.com'} - } - driver = notifier.NotifierDriver(require_confirmation=True) - self.conf.signed_url.secret_key = 'test_key' - self.conf.notification.external_confirmation_url = 'http://127.0.0.1' - self.conf.notification.require_confirmation = True - - message = {'methods': ['PUT'], - 'paths': ['/v2/queues/test_queue/subscriptions/' - '5760c9fb3990b42e8b7c20bd/confirm'], - 'project': str(self.project), - 'expires': '2016-12-20T02:01:23', - 'signature': 'e268676368c235dbe16e0e9ac40f2829a92c948288df' - '36e1cbabd9de73f698df', - } - confirm_url = self._make_confirm_string(self.conf, message, - 'test_queue') - msg = ('Content-Type: text/plain; charset="us-ascii"\n' - 'MIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nto:' - ' %(to)s\nfrom: %(from)s\nsubject: %(subject)s\n\n%(body)s') - if is_unsubscribed: - e = self.conf.notification.unsubscribe_confirmation_email_template - body = e['body'] - topic = e['topic'] - sender = e['sender'] - else: - e = self.conf.notification.subscription_confirmation_email_template - body = e['body'] - topic = e['topic'] - sender = e['sender'] - body = body.format(subscription['source'], str(self.project), - confirm_url) - mail1 = msg % {'to': subscription['subscriber'][7:], - 'from': sender, - 'subject': topic, - 'body': body} - - called = set() - - def _communicate(msg): - called.add(msg) - - mock_process = mock.Mock() - attrs = {'communicate': _communicate} - mock_process.configure_mock(**attrs) - mock_popen.return_value = mock_process - mock_signed_url.return_value = message - driver.send_confirm_notification('test_queue', subscription, self.conf, - str(self.project), - api_version=self.api_version, - is_unsubscribed=is_unsubscribed) - driver.executor.shutdown() - - self.assertEqual(1, mock_popen.call_count) - options, body = mail1.split('\n\n') - expec_options = [options] - expect_body = [body] - called_options = [] - called_bodies = [] - for call in called: - options, body = call.split('\n\n') - called_options.append(options) - called_bodies.append(body) - self.assertEqual(expec_options, called_options) - self.assertEqual(expect_body, called_bodies) - - @ddt.data(False, True) - def test_send_confirm_notification_with_email(self, is_unsub): - self._send_confirm_notification_with_email(is_unsubscribed=is_unsub) diff --git a/zaqar/tests/unit/storage/__init__.py b/zaqar/tests/unit/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/storage/base.py b/zaqar/tests/unit/storage/base.py deleted file mode 100644 index 57622505..00000000 --- a/zaqar/tests/unit/storage/base.py +++ /dev/null @@ -1,1828 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# Copyright (c) 2014 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import datetime -import math -import random -import time -import uuid - -import ddt -import mock -from oslo_utils import timeutils -import six -from testtools import matchers - -from zaqar.common import cache as oslo_cache -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage import pipeline -from zaqar import tests as testing -from zaqar.tests import helpers - - -class ControllerBaseTest(testing.TestBase): - project = 'project' - driver_class = None - controller_class = None - controller_base_class = None - - def setUp(self): - super(ControllerBaseTest, self).setUp() - - if not self.driver_class: - self.skipTest('No driver class specified') - - if not issubclass(self.controller_class, self.controller_base_class): - self.skipTest('{0} is not an instance of {1}. ' - 'Tests not supported'.format( - self.controller_class, - self.controller_base_class)) - - oslo_cache.register_config(self.conf) - cache = oslo_cache.get_cache(self.conf) - - pooling = 'pooling' in self.conf and self.conf.pooling - if pooling and not self.control_driver_class: - self.skipTest("Pooling is enabled, " - "but control driver class is not specified") - - self.control = self.control_driver_class(self.conf, cache) - if not pooling: - args = [self.conf, cache] - if issubclass(self.driver_class, storage.DataDriverBase): - args.append(self.control) - self.driver = self.driver_class(*args) - else: - uri = self.mongodb_url - for i in range(4): - db_name = "zaqar_test_pools_" + str(i) - - # NOTE(dynarro): we need to create a unique uri. - uri = "%s/%s" % (uri, db_name) - options = {'database': db_name} - self.control.pools_controller.create(six.text_type(i), - 100, uri, options=options) - self.driver = self.driver_class(self.conf, cache, self.control) - self.addCleanup(self.control.pools_controller.drop_all) - self.addCleanup(self.control.catalogue_controller.drop_all) - - self._prepare_conf() - - self.addCleanup(self._purge_databases) - - if not pooling: - self.controller = self.controller_class(self.driver) - else: - self.controller = self.controller_class(self.driver._pool_catalog) - - self.pipeline = pipeline.DataDriver(self.conf, - self.driver, - self.control) - - def _prepare_conf(self): - """Prepare the conf before running tests - - Classes overriding this method, must use - the `self.conf` instance and alter its state. - """ - - def _purge_databases(self): - """Override to clean databases.""" - - -@ddt.ddt -class QueueControllerTest(ControllerBaseTest): - """Queue Controller base tests.""" - controller_base_class = storage.Queue - - def setUp(self): - super(QueueControllerTest, self).setUp() - self.queue_controller = self.pipeline.queue_controller - - @ddt.data(None, ControllerBaseTest.project) - def test_list(self, project): - # NOTE(kgriffs): Ensure we mix global and scoped queues - # in order to verify that queue records are excluded that - # are not at the same level. - project_alt = self.project if project is None else None - - num = 15 - for queue in six.moves.xrange(num): - queue = str(queue) - self.controller.create(queue, project=project) - self.controller.create(queue, project=project_alt) - self.addCleanup(self.controller.delete, - queue, project=project) - self.addCleanup(self.controller.delete, - queue, project=project_alt) - - interaction = self.controller.list(project=project, - detailed=True) - queues = list(next(interaction)) - - self.assertTrue(all(map(lambda queue: - 'name' in queue and - 'metadata' in queue, queues))) - self.assertEqual(10, len(queues)) - - interaction = self.controller.list(project=project, - marker=next(interaction)) - queues = list(next(interaction)) - - self.assertTrue(all(map(lambda queue: - 'name' in queue and - 'metadata' not in queue, queues))) - - self.assertEqual(5, len(queues)) - - def test_queue_lifecycle(self): - # Test queue creation - created = self.controller.create('test', - metadata=dict(meta='test_meta'), - project=self.project) - self.assertTrue(created) - - # Test queue existence - self.assertTrue(self.controller.exists('test', project=self.project)) - - # Test queue retrieval - interaction = self.controller.list(project=self.project) - queue = list(next(interaction))[0] - self.assertEqual('test', queue['name']) - - # Test queue metadata retrieval - metadata = self.controller.get('test', project=self.project) - self.assertEqual('test_meta', metadata['meta']) - - # Touching an existing queue does not affect metadata - created = self.controller.create('test', project=self.project) - self.assertFalse(created) - - metadata = self.controller.get('test', project=self.project) - self.assertEqual('test_meta', metadata['meta']) - - # Test queue deletion - self.controller.delete('test', project=self.project) - - # Test queue existence - self.assertFalse(self.controller.exists('test', project=self.project)) - - -class MessageControllerTest(ControllerBaseTest): - """Message Controller base tests. - - NOTE(flaper87): Implementations of this class should - override the tearDown method in order - to clean up storage's state. - """ - queue_name = 'test_queue' - controller_base_class = storage.Message - - # Specifies how often expired messages are purged, in sec. - gc_interval = 0 - - def setUp(self): - super(MessageControllerTest, self).setUp() - - # Lets create a queue - self.queue_controller = self.pipeline.queue_controller - self.claim_controller = self.pipeline.claim_controller - self.queue_controller.create(self.queue_name, project=self.project) - - def tearDown(self): - self.queue_controller.delete(self.queue_name, project=self.project) - super(MessageControllerTest, self).tearDown() - - def test_stats_for_empty_queue(self): - self.addCleanup(self.queue_controller.delete, 'test', - project=self.project) - created = self.queue_controller.create('test', project=self.project) - self.assertTrue(created) - - stats = self.queue_controller.stats('test', project=self.project) - message_stats = stats['messages'] - - self.assertEqual(0, message_stats['free']) - self.assertEqual(0, message_stats['claimed']) - self.assertEqual(0, message_stats['total']) - - self.assertNotIn('newest', message_stats) - self.assertNotIn('oldest', message_stats) - - def test_queue_count_on_bulk_delete(self): - self.addCleanup(self.queue_controller.delete, 'test-queue', - project=self.project) - queue_name = 'test-queue' - client_uuid = uuid.uuid4() - - created = self.queue_controller.create(queue_name, - project=self.project) - self.assertTrue(created) - - # Create 10 messages. - msg_keys = _insert_fixtures(self.controller, queue_name, - project=self.project, - client_uuid=client_uuid, num=10) - - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(10, stats['total']) - - # Delete 5 messages - self.controller.bulk_delete(queue_name, msg_keys[0:5], - self.project) - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(5, stats['total']) - - def test_queue_count_on_bulk_delete_with_invalid_id(self): - self.addCleanup(self.queue_controller.delete, 'test-queue', - project=self.project) - queue_name = 'test-queue' - client_uuid = uuid.uuid4() - - created = self.queue_controller.create(queue_name, - project=self.project) - self.assertTrue(created) - - # Create 10 messages. - msg_keys = _insert_fixtures(self.controller, queue_name, - project=self.project, - client_uuid=client_uuid, num=10) - - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(10, stats['total']) - - # Delete 5 messages - self.controller.bulk_delete(queue_name, - msg_keys[0:5] + ['invalid'], - self.project) - - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(5, stats['total']) - - def test_queue_count_on_delete(self): - self.addCleanup(self.queue_controller.delete, 'test-queue', - project=self.project) - queue_name = 'test-queue' - client_uuid = uuid.uuid4() - - created = self.queue_controller.create(queue_name, - project=self.project) - self.assertTrue(created) - - # Create 10 messages. - msg_keys = _insert_fixtures(self.controller, queue_name, - project=self.project, - client_uuid=client_uuid, num=10) - - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(10, stats['total']) - - # Delete 1 message - self.controller.delete(queue_name, msg_keys[0], self.project) - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(9, stats['total']) - - def test_queue_stats(self): - # Test queue creation - self.addCleanup(self.queue_controller.delete, 'test', - project=self.project) - created = self.queue_controller.create('test', - metadata=dict(meta='test_meta'), - project=self.project) - - client_uuid = uuid.uuid4() - # Test queue statistic - _insert_fixtures(self.controller, 'test', - project=self.project, client_uuid=client_uuid, - num=6) - - # NOTE(kgriffs): We can't get around doing this, because - # we don't know how the storage drive may be calculating - # message timestamps (and may not be monkey-patchable). - time.sleep(1.2) - - _insert_fixtures(self.controller, 'test', - project=self.project, client_uuid=client_uuid, - num=6) - - stats = self.queue_controller.stats('test', project=self.project) - message_stats = stats['messages'] - - self.assertEqual(12, message_stats['free']) - self.assertEqual(0, message_stats['claimed']) - self.assertEqual(12, message_stats['total']) - - oldest = message_stats['oldest'] - newest = message_stats['newest'] - - self.assertNotEqual(oldest, newest) - - age = oldest['age'] - self.assertThat(age, matchers.GreaterThan(0)) - - # NOTE(kgriffs): Ensure is different enough - # for the next comparison to work. - soon = timeutils.utcnow() + datetime.timedelta(seconds=60) - - for message_stat in (oldest, newest): - created_iso = message_stat['created'] - created = timeutils.parse_isotime(created_iso) - self.assertThat(timeutils.normalize_time(created), - matchers.LessThan(soon)) - - self.assertIn('id', message_stat) - - self.assertThat(oldest['created'], - matchers.LessThan(newest['created'])) - - def test_queue_count_on_claim_delete(self): - self.addCleanup(self.queue_controller.delete, 'test-queue', - project=self.project) - queue_name = 'test-queue' - client_uuid = uuid.uuid4() - - created = self.queue_controller.create(queue_name, - project=self.project) - self.assertTrue(created) - - # Create 15 messages. - msg_keys = _insert_fixtures(self.controller, queue_name, - project=self.project, - client_uuid=client_uuid, num=15) - - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(15, stats['total']) - - metadata = {'ttl': 120, 'grace': 60} - # Claim 10 messages - claim_id, _ = self.claim_controller.create(queue_name, metadata, - self.project) - - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(10, stats['claimed']) - - # Delete one message and ensure stats are updated even - # thought the claim itself has not been deleted. - self.controller.delete(queue_name, msg_keys[0], - self.project, claim_id) - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(14, stats['total']) - self.assertEqual(9, stats['claimed']) - self.assertEqual(5, stats['free']) - - # Same thing but use bulk_delete interface - self.controller.bulk_delete(queue_name, msg_keys[1:3], - self.project) - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - self.assertEqual(12, stats['total']) - self.assertEqual(7, stats['claimed']) - self.assertEqual(5, stats['free']) - - # Delete the claim - self.claim_controller.delete(queue_name, claim_id, - self.project) - stats = self.queue_controller.stats(queue_name, - self.project)['messages'] - - self.assertEqual(0, stats['claimed']) - - def test_message_lifecycle(self): - queue_name = self.queue_name - - message = { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce' - } - } - - # Test Message Creation - created = list(self.controller.post(queue_name, [message], - project=self.project, - client_uuid=uuid.uuid4())) - self.assertEqual(1, len(created)) - message_id = created[0] - - # Test Message Get - message_out = self.controller.get(queue_name, message_id, - project=self.project) - self.assertEqual({'id', 'body', 'ttl', 'age', 'claim_id'}, - set(message_out)) - self.assertEqual(message_id, message_out['id']) - self.assertEqual(message['body'], message_out['body']) - self.assertEqual(message['ttl'], message_out['ttl']) - - # Test Message Deletion - self.controller.delete(queue_name, message_id, project=self.project) - - # Test does not exist - with testing.expect(errors.DoesNotExist): - self.controller.get(queue_name, message_id, project=self.project) - - def test_get_multi(self): - client_uuid = uuid.uuid4() - - _insert_fixtures(self.controller, self.queue_name, - project=self.project, client_uuid=client_uuid, num=15) - - def load_messages(expected, *args, **kwargs): - interaction = self.controller.list(*args, **kwargs) - msgs = list(next(interaction)) - self.assertEqual(expected, len(msgs)) - return interaction - - # Test all messages, echo False and uuid - load_messages(0, self.queue_name, project=self.project, - client_uuid=client_uuid) - - # Test all messages and limit - load_messages(15, self.queue_name, project=self.project, limit=20, - echo=True) - - # Test default limit - load_messages(storage.DEFAULT_MESSAGES_PER_PAGE, - self.queue_name, project=self.project, echo=True) - - # Test all messages, echo True, and uuid - interaction = load_messages(10, self.queue_name, echo=True, - project=self.project, - client_uuid=client_uuid) - - # Test all messages, echo True, uuid and marker - load_messages(5, self.queue_name, echo=True, project=self.project, - marker=next(interaction), client_uuid=client_uuid) - - def test_multi_ids(self): - messages_in = [{'ttl': 120, 'body': 0}, {'ttl': 240, 'body': 1}] - ids = self.controller.post(self.queue_name, messages_in, - project=self.project, - client_uuid=uuid.uuid4()) - - messages_out = self.controller.bulk_get(self.queue_name, ids, - project=self.project) - - for idx, message in enumerate(messages_out): - self.assertEqual({'id', 'body', 'ttl', 'age', 'claim_id'}, - set(message)) - self.assertEqual(idx, message['body']) - - self.controller.bulk_delete(self.queue_name, ids, - project=self.project) - - with testing.expect(StopIteration): - result = self.controller.bulk_get(self.queue_name, ids, - project=self.project) - next(result) - - def test_claim_effects(self): - client_uuid = uuid.uuid4() - - _insert_fixtures(self.controller, self.queue_name, - project=self.project, client_uuid=client_uuid, num=12) - - def list_messages(include_claimed=None): - kwargs = { - 'project': self.project, - 'client_uuid': client_uuid, - 'echo': True, - } - - # Properly test default value - if include_claimed is not None: - kwargs['include_claimed'] = include_claimed - - interaction = self.controller.list(self.queue_name, **kwargs) - - messages = next(interaction) - return [msg['id'] for msg in messages] - - messages_before = list_messages(True) - - meta = {'ttl': 70, 'grace': 60} - another_cid, _ = self.claim_controller.create(self.queue_name, meta, - project=self.project) - - messages_after = list_messages(True) - self.assertEqual(messages_before, messages_after) - - messages_excluding_claimed = list_messages() - self.assertNotEqual(messages_before, messages_excluding_claimed) - self.assertEqual(2, len(messages_excluding_claimed)) - - cid, msgs = self.claim_controller.create(self.queue_name, meta, - project=self.project) - [msg1, msg2] = msgs - - # A wrong claim does not ensure the message deletion - with testing.expect(errors.NotPermitted): - self.controller.delete(self.queue_name, msg1['id'], - project=self.project, - claim=another_cid) - - # Make sure a message can be deleted with a claim - self.controller.delete(self.queue_name, msg1['id'], - project=self.project, - claim=cid) - - with testing.expect(errors.DoesNotExist): - self.controller.get(self.queue_name, msg1['id'], - project=self.project) - - # Make sure such a deletion is idempotent - self.controller.delete(self.queue_name, msg1['id'], - project=self.project, - claim=cid) - - # A non-existing claim does not ensure the message deletion - self.claim_controller.delete(self.queue_name, cid, - project=self.project) - - # NOTE(kgriffs) Message is no longer claimed, but try - # to delete it with the claim anyway. It should raise - # an error, because the client needs a hint that - # perhaps the claim expired before it got around to - # trying to delete the message, which means another - # worker could be processing this message now. - with testing.expect(errors.NotPermitted, errors.ClaimDoesNotExist): - self.controller.delete(self.queue_name, msg2['id'], - project=self.project, - claim=cid) - - @testing.is_slow(condition=lambda self: self.gc_interval > 1) - def test_expired_messages(self): - messages = [{'body': 3.14, 'ttl': 1}, {'body': 0.618, 'ttl': 600}] - client_uuid = uuid.uuid4() - - [msgid_expired, msgid] = self.controller.post(self.queue_name, - messages, - project=self.project, - client_uuid=client_uuid) - - # NOTE(kgriffs): Allow for automatic GC of claims, messages - for i in range(self.gc_interval): - time.sleep(1) - - # NOTE(kgriffs): Some drivers require a manual GC to be - # triggered to clean up claims and messages. - self.driver.gc() - - try: - self.controller.get(self.queue_name, msgid_expired, - project=self.project) - except errors.DoesNotExist: - break - else: - self.fail("Didn't remove the queue") - - # Make sure expired messages not return when listing - interaction = self.controller.list(self.queue_name, - project=self.project) - - messages = list(next(interaction)) - self.assertEqual(1, len(messages)) - self.assertEqual(msgid, messages[0]['id']) - - stats = self.queue_controller.stats(self.queue_name, - project=self.project) - self.assertEqual(1, stats['messages']['free']) - - # Make sure expired messages not return when popping - messages = self.controller.pop(self.queue_name, - limit=10, - project=self.project) - self.assertEqual(1, len(messages)) - self.assertEqual(msgid, messages[0]['id']) - - def test_bad_id(self): - # NOTE(cpp-cabrera): A malformed ID should result in an empty - # query. Raising an exception for validating IDs makes the - # implementation more verbose instead of taking advantage of - # the Maybe/Optional protocol, particularly when dealing with - # bulk operations. - bad_message_id = 'xyz' - self.controller.delete(self.queue_name, - bad_message_id, - project=self.project) - - with testing.expect(errors.MessageDoesNotExist): - self.controller.get(self.queue_name, - bad_message_id, - project=self.project) - - def test_bad_claim_id(self): - [msgid] = self.controller.post(self.queue_name, - [{'body': {}, 'ttl': 10}], - project=self.project, - client_uuid=uuid.uuid4()) - - # NOTE(kgriffs): If the client has a typo or - # something, they will need a hint that the - # request was invalid. - # - # On the other hand, if they are actually - # probing for a vulnerability, telling them - # the claim they requested doesn't exist should - # be harmless. - with testing.expect(storage.errors.ClaimDoesNotExist): - bad_claim_id = '; DROP TABLE queues' - self.controller.delete(self.queue_name, - msgid, - project=self.project, - claim=bad_claim_id) - - def test_bad_marker(self): - bad_marker = 'xyz' - interaction = self.controller.list(self.queue_name, - project=self.project, - client_uuid=uuid.uuid4(), - marker=bad_marker) - messages = list(next(interaction)) - - self.assertEqual([], messages) - - def test_sort_for_first(self): - client_uuid = uuid.uuid4() - - [msgid_first] = self.controller.post(self.queue_name, - [{'body': {}, 'ttl': 120}], - project=self.project, - client_uuid=client_uuid) - - _insert_fixtures(self.controller, self.queue_name, - project=self.project, client_uuid=client_uuid, num=10) - - [msgid_last] = self.controller.post(self.queue_name, - [{'body': {}, 'ttl': 120}], - project=self.project, - client_uuid=client_uuid) - - msg_asc = self.controller.first(self.queue_name, - self.project, - 1) - self.assertEqual(msgid_first, msg_asc['id']) - - msg_desc = self.controller.first(self.queue_name, - self.project, - -1) - self.assertEqual(msgid_last, msg_desc['id']) - - def test_get_first_with_empty_queue_exception(self): - self.assertRaises(errors.QueueIsEmpty, - self.controller.first, - self.queue_name, project=self.project) - - def test_get_first_with_invalid_sort_option(self): - self.assertRaises(ValueError, - self.controller.first, - self.queue_name, sort=0, - project=self.project) - - def test_pop_message(self): - self.queue_controller.create(self.queue_name, project=self.project) - messages = [ - { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'd378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'e378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - ] - - client_uuid = uuid.uuid1() - self.controller.post(self.queue_name, messages, client_uuid, - project=self.project) - - # Test Message Pop - popped_messages = self.controller.pop(self.queue_name, - limit=1, - project=self.project) - - self.assertEqual(1, len(popped_messages)) - - def test_message_period(self): - self.queue_controller.create(self.queue_name, project=self.project) - messages = [ - { - 'ttl': 60, - 'body': { - 'event.data': 'BackupStarted', - 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - ] - - client_uuid = uuid.uuid1() - self.controller.post(self.queue_name, messages, client_uuid, - project=self.project) - - stored_messages = self.controller.list(self.queue_name, - project=self.project) - - self.assertItemsEqual(['event.data', 'backupId'], - list(next(stored_messages))[0]['body'].keys()) - - def test_delete_message_from_nonexistent_queue(self): - queue_name = 'fake_name' - message_id = 'fake_id' - res = self.controller.delete(queue_name, message_id, - project=self.project) - self.assertIsNone(res) - - def test_delete_messages_with_ids_from__nonexistent_queue(self): - queue_name = 'fake_name' - message_ids = ['fake_id1', 'fake_id2'] - res = self.controller.bulk_delete(queue_name, message_ids, - project=self.project) - self.assertIsNone(res) - - def test_get_messages_with_ids_from__nonexistent_queue(self): - queue_name = 'fake_name' - message_ids = ['fake_id1', 'fake_id2'] - res = self.controller.bulk_get(queue_name, message_ids, - project=self.project) - - self.assertIsInstance(res, collections.Iterable) - self.assertEqual([], list(res)) - - -class ClaimControllerTest(ControllerBaseTest): - """Claim Controller base tests. - - NOTE(flaper87): Implementations of this class should - override the tearDown method in order - to clean up storage's state. - """ - queue_name = 'test_queue' - controller_base_class = storage.Claim - - def setUp(self): - super(ClaimControllerTest, self).setUp() - - # Lets create a queue - self.queue_controller = self.pipeline.queue_controller - self.message_controller = self.pipeline.message_controller - self.queue_controller.create(self.queue_name, project=self.project) - - def tearDown(self): - self.queue_controller.delete(self.queue_name, project=self.project) - super(ClaimControllerTest, self).tearDown() - - def test_claim_lifecycle(self): - _insert_fixtures(self.message_controller, self.queue_name, - project=self.project, client_uuid=uuid.uuid4(), - num=20) - - meta = {'ttl': 70, 'grace': 30} - - # Make sure create works - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project, - limit=15) - - messages = list(messages) - self.assertEqual(15, len(messages)) - - # Ensure Queue stats - countof = self.queue_controller.stats(self.queue_name, - project=self.project) - self.assertEqual(15, countof['messages']['claimed']) - self.assertEqual(5, countof['messages']['free']) - self.assertEqual(20, countof['messages']['total']) - - # Make sure get works - claim, messages2 = self.controller.get(self.queue_name, claim_id, - project=self.project) - - messages2 = list(messages2) - self.assertEqual(15, len(messages2)) - for msg1, msg2 in zip(messages, messages2): - self.assertEqual(msg1['body'], msg2['body']) - self.assertEqual(msg1['claim_id'], msg2['claim_id']) - self.assertEqual(msg1['id'], msg2['id']) - self.assertEqual(msg1['ttl'], msg2['ttl']) - self.assertEqual(70, claim['ttl']) - self.assertEqual(claim_id, claim['id']) - - new_meta = {'ttl': 100, 'grace': 60} - self.controller.update(self.queue_name, claim_id, - new_meta, project=self.project) - - # Make sure update works - claim, messages2 = self.controller.get(self.queue_name, claim_id, - project=self.project) - - messages2 = list(messages2) - self.assertEqual(15, len(messages2)) - - # TODO(zyuan): Add some tests to ensure the ttl is - # extended/not-extended. - for msg1, msg2 in zip(messages, messages2): - self.assertEqual(msg1['body'], msg2['body']) - - self.assertEqual(new_meta['ttl'], claim['ttl']) - self.assertEqual(claim_id, claim['id']) - - # Make sure delete works - self.controller.delete(self.queue_name, claim_id, - project=self.project) - - self.assertRaises(errors.ClaimDoesNotExist, - self.controller.get, self.queue_name, - claim_id, project=self.project) - - def test_claim_create_default_limit_multi(self): - num_claims = 5 - num_messages = storage.DEFAULT_MESSAGES_PER_CLAIM * num_claims - - # NOTE(kgriffs): + 1 on num_messages to check for off-by-one error - _insert_fixtures(self.message_controller, self.queue_name, - project=self.project, client_uuid=uuid.uuid4(), - num=num_messages + 1) - - meta = {'ttl': 70, 'grace': 30} - total_claimed = 0 - - for _ in range(num_claims): - claim_id, messages = self.controller.create( - self.queue_name, meta, project=self.project) - - messages = list(messages) - num_claimed = len(messages) - self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, - num_claimed) - - total_claimed += num_claimed - - self.assertEqual(num_messages, total_claimed) - - def test_extend_lifetime(self): - _insert_fixtures(self.message_controller, self.queue_name, - project=self.project, client_uuid=uuid.uuid4(), - num=20, ttl=120) - - meta = {'ttl': 777, 'grace': 0} - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - - for message in messages: - self.assertEqual(777, message['ttl']) - - def test_extend_lifetime_with_grace_1(self): - _insert_fixtures(self.message_controller, self.queue_name, - project=self.project, client_uuid=uuid.uuid4(), - num=20, ttl=120) - - meta = {'ttl': 777, 'grace': 23} - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - - for message in messages: - self.assertEqual(800, message['ttl']) - - def test_extend_lifetime_with_grace_2(self): - _insert_fixtures(self.message_controller, self.queue_name, - project=self.project, client_uuid=uuid.uuid4(), - num=20, ttl=120) - - meta = {'ttl': 121, 'grace': 22} - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - - for message in messages: - self.assertEqual(143, message['ttl']) - - def test_do_not_extend_lifetime(self): - _insert_fixtures(self.message_controller, self.queue_name, - project=self.project, client_uuid=uuid.uuid4(), - num=20, ttl=120) - - # Choose a ttl that is less than the message's current TTL - meta = {'ttl': 60, 'grace': 30} - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - - for message in messages: - self.assertEqual(120, message['ttl']) - - def test_expired_claim(self): - meta = {'ttl': 1, 'grace': 60} - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - time.sleep(1) - - with testing.expect(errors.DoesNotExist): - self.controller.get(self.queue_name, claim_id, - project=self.project) - - with testing.expect(errors.DoesNotExist): - self.controller.update(self.queue_name, claim_id, - meta, project=self.project) - - def test_delete_message_expired_claim(self): - meta = {'ttl': 2, 'grace': 2} - new_messages = [{'ttl': 60, 'body': {}}, - {'ttl': 60, 'body': {}}, - {'ttl': 60, 'body': {}}] - - self.message_controller.post(self.queue_name, new_messages, - client_uuid=str(uuid.uuid1()), - project=self.project) - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - - now = timeutils.utcnow_ts() - timeutils_utcnow = 'oslo_utils.timeutils.utcnow_ts' - - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now + 2 - - messages = [msg['id'] for msg in messages] - self.message_controller.delete(self.queue_name, - messages.pop(), - project=self.project) - - self.message_controller.bulk_delete(self.queue_name, - messages, - project=self.project) - - def test_illformed_id(self): - # any ill-formed IDs should be regarded as non-existing ones. - - self.controller.delete(self.queue_name, - 'illformed', - project=self.project) - - with testing.expect(errors.DoesNotExist): - self.controller.get(self.queue_name, - 'illformed', - project=self.project) - - with testing.expect(errors.DoesNotExist): - self.controller.update(self.queue_name, - 'illformed', - {'ttl': 40}, - project=self.project) - - def test_dead_letter_queue(self): - DLQ_name = "DLQ" - meta = {'ttl': 3, 'grace': 3} - self.queue_controller.create("DLQ", project=self.project) - # Set dead letter queeu metadata - metadata = {"_max_claim_count": 2, - "_dead_letter_queue": DLQ_name, - "_dead_letter_queue_messages_ttl": 9999} - self.queue_controller.set_metadata(self.queue_name, - metadata, - project=self.project) - - new_messages = [{'ttl': 3600, 'body': {"key": "value"}}] - - self.message_controller.post(self.queue_name, new_messages, - client_uuid=str(uuid.uuid1()), - project=self.project) - - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - self.assertIsNotNone(claim_id) - self.assertEqual(1, len(list(messages))) - time.sleep(5) - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - self.assertIsNotNone(claim_id) - messages = list(messages) - self.assertEqual(1, len(messages)) - time.sleep(5) - claim_id, messages = self.controller.create(self.queue_name, meta, - project=self.project) - self.assertIsNone(claim_id) - self.assertEqual(0, len(list(messages))) - - DLQ_messages = self.message_controller.list(DLQ_name, - project=self.project, - include_claimed=True) - expected_msg = list(next(DLQ_messages))[0] - self.assertEqual(9999, expected_msg["ttl"]) - self.assertEqual({"key": "value"}, expected_msg["body"]) - - -@ddt.ddt -class SubscriptionControllerTest(ControllerBaseTest): - """Subscriptions Controller base tests. - - """ - queue_name = 'test_queue' - controller_base_class = storage.Subscription - - def setUp(self): - super(SubscriptionControllerTest, self).setUp() - self.subscription_controller = self.driver.subscription_controller - self.queue_controller = self.driver.queue_controller - - self.source = self.queue_name - self.subscriber = 'http://trigger.me' - self.ttl = 600 - self.options = {'uri': 'http://fake.com'} - - def tearDown(self): - self.queue_controller.delete(self.queue_name, project=self.project) - super(SubscriptionControllerTest, self).tearDown() - - # NOTE(Eva-i): this method helps to test cases when the queue is - # pre-created and when it's not. - def _precreate_queue(self, precreate_queue): - if precreate_queue: - # Let's create a queue as the source of subscription - self.queue_controller.create(self.queue_name, project=self.project) - - @ddt.data(True, False) - def test_list(self, precreate_queue): - self._precreate_queue(precreate_queue) - for s in six.moves.xrange(15): - subscriber = 'http://fake_{0}'.format(s) - s_id = self.subscription_controller.create( - self.source, - subscriber, - self.ttl, - self.options, - project=self.project) - self.addCleanup(self.subscription_controller.delete, self.source, - s_id, self.project) - - added_age = 1 - time.sleep(added_age) - interaction = self.subscription_controller.list(self.source, - project=self.project) - subscriptions = list(next(interaction)) - - self.assertTrue(all(map(lambda s: - 'source' in s and 'subscriber' in s, - subscriptions))) - self.assertEqual(10, len(subscriptions)) - self.assertLessEqual(added_age, math.ceil(subscriptions[2]['age'])) - - interaction = (self.subscription_controller.list(self.source, - project=self.project, - marker=next(interaction))) - subscriptions = list(next(interaction)) - - self.assertTrue(all(map(lambda s: - 'source' in s and 'subscriber' in s, - subscriptions))) - self.assertEqual(5, len(subscriptions)) - - def test_small_list(self): - subscriber = 'http://fake' - s_id = self.subscription_controller.create( - self.source, - subscriber, - self.ttl, - self.options, - project=self.project) - self.addCleanup(self.subscription_controller.delete, self.source, - s_id, self.project) - - interaction = self.subscription_controller.list(self.source, - project=self.project) - subscriptions = list(next(interaction)) - marker = next(interaction) - - self.assertEqual(1, len(subscriptions)) - - interaction = (self.subscription_controller.list(self.source, - project=self.project, - marker=marker)) - subscriptions = list(next(interaction)) - - self.assertEqual([], subscriptions) - - @ddt.data(True, False) - def test_get_raises_if_subscription_does_not_exist(self, precreate_queue): - self._precreate_queue(precreate_queue) - self.assertRaises(errors.SubscriptionDoesNotExist, - self.subscription_controller.get, - self.queue_name, - 'notexists', - project=self.project) - - @ddt.data(True, False) - def test_lifecycle(self, precreate_queue): - self._precreate_queue(precreate_queue) - s_id = self.subscription_controller.create(self.source, - self.subscriber, - self.ttl, - self.options, - project=self.project) - added_age = 2 - time.sleep(added_age) - subscription = self.subscription_controller.get(self.queue_name, - s_id, - self.project) - - self.assertEqual(self.source, subscription['source']) - self.assertEqual(self.subscriber, subscription['subscriber']) - self.assertEqual(self.ttl, subscription['ttl']) - self.assertEqual(self.options, subscription['options']) - self.assertLessEqual(added_age, math.ceil(subscription['age'])) - - exist = self.subscription_controller.exists(self.queue_name, - s_id, - self.project) - - self.assertTrue(exist) - - self.subscription_controller.update(self.queue_name, - s_id, - project=self.project, - subscriber='http://a.com', - options={'funny': 'no'} - ) - - updated = self.subscription_controller.get(self.queue_name, - s_id, - self.project) - - self.assertEqual('http://a.com', updated['subscriber']) - self.assertEqual({'funny': 'no'}, updated['options']) - - self.subscription_controller.delete(self.queue_name, - s_id, project=self.project) - self.assertRaises(errors.SubscriptionDoesNotExist, - self.subscription_controller.get, - self.queue_name, s_id) - - @ddt.data(True, False) - def test_create_existed(self, precreate_queue): - self._precreate_queue(precreate_queue) - s_id = self.subscription_controller.create( - self.source, - self.subscriber, - self.ttl, - self.options, - project=self.project) - self.addCleanup(self.subscription_controller.delete, self.source, s_id, - self.project) - self.assertIsNotNone(s_id) - - s_id = self.subscription_controller.create(self.source, - self.subscriber, - self.ttl, - self.options, - project=self.project) - self.assertIsNone(s_id) - - def test_get_update_delete_on_non_existing_queue(self): - self._precreate_queue(precreate_queue=True) - s_id = self.subscription_controller.create( - self.source, - self.subscriber, - self.ttl, - self.options, - project=self.project) - self.addCleanup(self.subscription_controller.delete, self.source, s_id, - self.project) - self.assertIsNotNone(s_id) - non_existing_queue = "fake_name" - # get - self.assertRaises(errors.SubscriptionDoesNotExist, - self.subscription_controller.get, - non_existing_queue, s_id, project=self.project) - # update - body = { - "subscriber": self.subscriber, - "ttl": self.ttl, - "options": self.options - } - self.assertRaises(errors.SubscriptionDoesNotExist, - self.subscription_controller.update, - non_existing_queue, s_id, project=self.project, - **body) - # delete - self.subscription_controller.delete(non_existing_queue, s_id, - project=self.project) - s_id = self.subscription_controller.get(self.queue_name, s_id, - project=self.project) - self.assertIsNotNone(s_id) - - def test_nonexist_source(self): - try: - s_id = self.subscription_controller.create('fake_queue_name', - self.subscriber, - self.ttl, - self.options, - self.project) - except Exception: - self.fail("Subscription controller should not raise an exception " - "in case of non-existing queue.") - self.addCleanup(self.subscription_controller.delete, 'fake_queue_name', - s_id, self.project) - - @ddt.data(True, False) - def test_update_raises_if_try_to_update_to_existing_subscription( - self, - precreate_queue): - self._precreate_queue(precreate_queue) - # create two subscriptions: fake_0 and fake_1 - ids = [] - for s in six.moves.xrange(2): - subscriber = 'http://fake_{0}'.format(s) - s_id = self.subscription_controller.create( - self.source, - subscriber, - self.ttl, - self.options, - project=self.project) - self.addCleanup(self.subscription_controller.delete, self.source, - s_id, self.project) - ids.append(s_id) - # update fake_0 to fake_2, success - update_fields = { - 'subscriber': 'http://fake_2' - } - self.subscription_controller.update(self.queue_name, - ids[0], - project=self.project, - **update_fields) - # update fake_1 to fake_2, raise error - self.assertRaises(errors.SubscriptionAlreadyExists, - self.subscription_controller.update, - self.queue_name, - ids[1], - project=self.project, - **update_fields) - - @ddt.data(True, False) - def test_update_raises_if_subscription_does_not_exist(self, - precreate_queue): - self._precreate_queue(precreate_queue) - update_fields = { - 'subscriber': 'http://fake' - } - self.assertRaises(errors.SubscriptionDoesNotExist, - self.subscription_controller.update, - self.queue_name, - 'notexists', - project=self.project, - **update_fields) - - def test_confirm(self): - s_id = self.subscription_controller.create(self.source, - self.subscriber, - self.ttl, - self.options, - project=self.project) - self.addCleanup(self.subscription_controller.delete, self.source, - s_id, self.project) - subscription = self.subscription_controller.get(self.source, s_id, - project=self.project) - - self.assertFalse(subscription['confirmed']) - - self.subscription_controller.confirm(self.source, s_id, - project=self.project, - confirmed=True) - subscription = self.subscription_controller.get(self.source, s_id, - project=self.project) - - self.assertTrue(subscription['confirmed']) - - def test_confirm_with_nonexist_subscription(self): - s_id = 'fake-id' - self.assertRaises(errors.SubscriptionDoesNotExist, - self.subscription_controller.confirm, - self.source, s_id, project=self.project, - confirmed=True - ) - - -class PoolsControllerTest(ControllerBaseTest): - """Pools Controller base tests. - - NOTE(flaper87): Implementations of this class should - override the tearDown method in order - to clean up storage's state. - """ - controller_base_class = storage.PoolsBase - - def setUp(self): - super(PoolsControllerTest, self).setUp() - self.pools_controller = self.driver.pools_controller - - # Let's create one pool - self.pool = str(uuid.uuid1()) - self.pool_group = str(uuid.uuid1()) - self.pools_controller.create(self.pool, 100, 'localhost', - group=self.pool_group, options={}) - - self.flavors_controller = self.driver.flavors_controller - - def tearDown(self): - self.pools_controller.drop_all() - super(PoolsControllerTest, self).tearDown() - - def test_create_succeeds(self): - self.pools_controller.create(str(uuid.uuid1()), - 100, 'localhost:13124', - options={}) - - def test_create_replaces_on_duplicate_insert(self): - name = str(uuid.uuid1()) - self.pools_controller.create(name, - 100, 'localhost:76553', - options={}) - self.pools_controller.create(name, - 111, 'localhost:758353', - options={}) - entry = self.pools_controller.get(name) - self._pool_expects(entry, xname=name, xweight=111, - xlocation='localhost:758353') - - def _pool_expects(self, pool, xname, xweight, xlocation): - self.assertIn('name', pool) - self.assertEqual(xname, pool['name']) - self.assertIn('weight', pool) - self.assertEqual(xweight, pool['weight']) - self.assertIn('uri', pool) - self.assertEqual(xlocation, pool['uri']) - - def test_get_returns_expected_content(self): - res = self.pools_controller.get(self.pool) - self._pool_expects(res, self.pool, 100, 'localhost') - self.assertNotIn('options', res) - - def test_detailed_get_returns_expected_content(self): - res = self.pools_controller.get(self.pool, detailed=True) - self.assertIn('options', res) - self.assertEqual({}, res['options']) - - def test_get_raises_if_not_found(self): - self.assertRaises(errors.PoolDoesNotExist, - self.pools_controller.get, 'notexists') - - def test_exists(self): - self.assertTrue(self.pools_controller.exists(self.pool)) - self.assertFalse(self.pools_controller.exists('notexists')) - - def test_update_raises_assertion_error_on_bad_fields(self): - self.assertRaises(AssertionError, self.pools_controller.update, - self.pool) - - def test_update_works(self): - # NOTE(flaper87): This may fail for redis. Create - # a dummy store for tests. - self.pools_controller.update(self.pool, weight=101, - uri='localhost3', - options={'a': 1}) - res = self.pools_controller.get(self.pool, detailed=True) - self._pool_expects(res, self.pool, 101, 'localhost3') - self.assertEqual({'a': 1}, res['options']) - - def test_delete_works(self): - self.pools_controller.delete(self.pool) - self.assertFalse(self.pools_controller.exists(self.pool)) - - def test_delete_nonexistent_is_silent(self): - self.pools_controller.delete('nonexisting') - - def test_drop_all_leads_to_empty_listing(self): - self.pools_controller.drop_all() - cursor = self.pools_controller.list() - pools = next(cursor) - self.assertRaises(StopIteration, next, pools) - - def test_listing_simple(self): - # NOTE(cpp-cabrera): base entry interferes with listing results - self.pools_controller.delete(self.pool) - - pools = [] - marker = '' - for i in range(15): - n = str(uuid.uuid4()) - w = random.randint(1, 100) - pools.append({'n': n, 'w': w, 'u': str(i)}) - - # Keep the max name as marker - if n > marker: - marker = n - - self.pools_controller.create(n, w, str(i), options={}) - - # Get the target pool - def _pool(name): - pool = [p for p in pools if p['n'] == name] - self.assertEqual(1, len(pool)) - - pool = pool[0] - n = pool['n'] - w = pool['w'] - u = pool['u'] - - return n, w, u - - def get_res(**kwargs): - cursor = self.pools_controller.list(**kwargs) - res = list(next(cursor)) - marker = next(cursor) - # TODO(jeffrey4l): marker should exist - self.assertTrue(marker) - return res - - res = get_res() - self.assertEqual(10, len(res)) - for entry in res: - n, w, u = _pool(entry['name']) - - self._pool_expects(entry, n, w, u) - self.assertNotIn('options', entry) - - res = get_res(limit=5) - self.assertEqual(5, len(res)) - - res = get_res(limit=0) - self.assertEqual(15, len(res)) - - next_name = marker + 'n' - self.pools_controller.create(next_name, 123, '123', options={}) - res = get_res(marker=marker) - self._pool_expects(res[0], next_name, 123, '123') - self.pools_controller.delete(next_name) - - res = get_res(detailed=True) - self.assertEqual(10, len(res)) - for entry in res: - n, w, u = _pool(entry['name']) - - self._pool_expects(entry, n, w, u) - self.assertIn('options', entry) - self.assertEqual({}, entry['options']) - - def test_mismatching_capabilities(self): - # NOTE(flaper87): This may fail for redis. Create - # a dummy store for tests. - with testing.expect(errors.PoolCapabilitiesMismatch): - self.pools_controller.create(str(uuid.uuid1()), - 100, 'redis://localhost', - group=self.pool_group, - options={}) - - -class CatalogueControllerTest(ControllerBaseTest): - controller_base_class = storage.CatalogueBase - - def setUp(self): - super(CatalogueControllerTest, self).setUp() - self.controller = self.driver.catalogue_controller - self.pool_ctrl = self.driver.pools_controller - self.queue = six.text_type(uuid.uuid4()) - self.project = six.text_type(uuid.uuid4()) - - self.pool = str(uuid.uuid1()) - self.pool_group = str(uuid.uuid1()) - self.pool_ctrl.create(self.pool, 100, 'localhost', - group=self.pool_group, options={}) - self.addCleanup(self.pool_ctrl.delete, self.pool) - - def tearDown(self): - self.controller.drop_all() - super(CatalogueControllerTest, self).tearDown() - - def _check_structure(self, entry): - self.assertIn('queue', entry) - self.assertIn('project', entry) - self.assertIn('pool', entry) - self.assertIsInstance(entry['queue'], six.text_type) - self.assertIsInstance(entry['project'], six.text_type) - self.assertIsInstance(entry['pool'], six.text_type) - - def _check_value(self, entry, xqueue, xproject, xpool): - self.assertEqual(xqueue, entry['queue']) - self.assertEqual(xproject, entry['project']) - self.assertEqual(xpool, entry['pool']) - - def test_catalogue_entry_life_cycle(self): - queue = self.queue - project = self.project - - # check listing is initially empty - for p in self.controller.list(project): - self.fail('There should be no entries at this time') - - # create a listing, check its length - with helpers.pool_entries(self.controller, - self.pool_ctrl, 10) as expect: - project = expect[0][0] - xs = list(self.controller.list(project)) - self.assertEqual(10, len(xs)) - - # create, check existence, delete - with helpers.pool_entry(self.controller, project, queue, self.pool): - self.assertTrue(self.controller.exists(project, queue)) - - # verify it no longer exists - self.assertFalse(self.controller.exists(project, queue)) - - # verify it isn't listable - self.assertEqual(0, len(list(self.controller.list(project)))) - - def test_list(self): - with helpers.pool_entries(self.controller, - self.pool_ctrl, 10) as expect: - values = zip(self.controller.list(u'_'), expect) - for e, x in values: - p, q, s = x - self._check_structure(e) - self._check_value(e, xqueue=q, xproject=p, xpool=s) - - def test_update(self): - p2 = u'b' - self.pool_ctrl.create(p2, 100, '127.0.0.1', - group=self.pool_group, - options={}) - self.addCleanup(self.pool_ctrl.delete, p2) - - with helpers.pool_entry(self.controller, self.project, - self.queue, self.pool) as expect: - p, q, s = expect - self.controller.update(p, q, pool=p2) - entry = self.controller.get(p, q) - self._check_value(entry, xqueue=q, xproject=p, xpool=p2) - - def test_update_raises_when_entry_does_not_exist(self): - e = self.assertRaises(errors.QueueNotMapped, - self.controller.update, - 'p', 'q', 'a') - self.assertIn('queue q for project p', str(e)) - - def test_get(self): - with helpers.pool_entry(self.controller, - self.project, - self.queue, self.pool) as expect: - p, q, s = expect - e = self.controller.get(p, q) - self._check_value(e, xqueue=q, xproject=p, xpool=s) - - def test_get_raises_if_does_not_exist(self): - with helpers.pool_entry(self.controller, - self.project, - self.queue, u'a') as expect: - p, q, _ = expect - self.assertRaises(errors.QueueNotMapped, - self.controller.get, - p, 'non_existing') - self.assertRaises(errors.QueueNotMapped, - self.controller.get, - 'non_existing', q) - self.assertRaises(errors.QueueNotMapped, - self.controller.get, - 'non_existing', 'non_existing') - - def test_exists(self): - with helpers.pool_entry(self.controller, - self.project, - self.queue, self.pool) as expect: - p, q, _ = expect - self.assertTrue(self.controller.exists(p, q)) - self.assertFalse(self.controller.exists('nada', 'not_here')) - - def test_insert(self): - q1 = six.text_type(uuid.uuid1()) - q2 = six.text_type(uuid.uuid1()) - self.controller.insert(self.project, q1, u'a') - self.controller.insert(self.project, q2, u'a') - - -class FlavorsControllerTest(ControllerBaseTest): - """Flavors Controller base tests. - - NOTE(flaper87): Implementations of this class should - override the tearDown method in order - to clean up storage's state. - """ - controller_base_class = storage.FlavorsBase - - def setUp(self): - super(FlavorsControllerTest, self).setUp() - self.pools_controller = self.driver.pools_controller - self.flavors_controller = self.driver.flavors_controller - - # Let's create one pool - self.pool = str(uuid.uuid1()) - self.pool_group = str(uuid.uuid1()) - self.pools_controller.create(self.pool, 100, 'localhost', - group=self.pool_group, options={}) - self.addCleanup(self.pools_controller.delete, self.pool) - - def tearDown(self): - self.flavors_controller.drop_all() - super(FlavorsControllerTest, self).tearDown() - - def test_create_succeeds(self): - self.flavors_controller.create('durable', self.pool_group, - project=self.project, - capabilities={}) - - def _flavors_expects(self, flavor, xname, xproject, xpool): - self.assertIn('name', flavor) - self.assertEqual(xname, flavor['name']) - self.assertNotIn('project', flavor) - self.assertIn('pool_group', flavor) - self.assertEqual(xpool, flavor['pool_group']) - - def test_create_replaces_on_duplicate_insert(self): - name = str(uuid.uuid1()) - self.flavors_controller.create(name, self.pool_group, - project=self.project, - capabilities={}) - - pool2 = 'another_pool' - self.pools_controller.create(pool2, 100, 'localhost:27017', - group=pool2, options={}) - self.addCleanup(self.pools_controller.delete, pool2) - - self.flavors_controller.create(name, pool2, - project=self.project, - capabilities={}) - entry = self.flavors_controller.get(name, project=self.project) - self._flavors_expects(entry, name, self.project, pool2) - - def test_get_returns_expected_content(self): - name = 'durable' - capabilities = {'fifo': True} - self.flavors_controller.create(name, self.pool_group, - project=self.project, - capabilities=capabilities) - res = self.flavors_controller.get(name, project=self.project) - self._flavors_expects(res, name, self.project, self.pool_group) - self.assertNotIn('capabilities', res) - - def test_detailed_get_returns_expected_content(self): - name = 'durable' - capabilities = {'fifo': True} - self.flavors_controller.create(name, self.pool_group, - project=self.project, - capabilities=capabilities) - res = self.flavors_controller.get(name, project=self.project, - detailed=True) - self._flavors_expects(res, name, self.project, self.pool_group) - self.assertIn('capabilities', res) - self.assertEqual(capabilities, res['capabilities']) - - def test_get_raises_if_not_found(self): - self.assertRaises(errors.FlavorDoesNotExist, - self.flavors_controller.get, 'notexists') - - def test_exists(self): - self.flavors_controller.create('exists', self.pool_group, - project=self.project, - capabilities={}) - self.assertTrue(self.flavors_controller.exists('exists', - project=self.project)) - self.assertFalse(self.flavors_controller.exists('notexists', - project=self.project)) - - def test_update_raises_assertion_error_on_bad_fields(self): - self.assertRaises(AssertionError, self.pools_controller.update, - self.pool_group) - - def test_update_works(self): - name = 'yummy' - self.flavors_controller.create(name, self.pool_group, - project=self.project, - capabilities={}) - - res = self.flavors_controller.get(name, project=self.project, - detailed=True) - - p = 'olympic' - pool_group = 'sports' - self.pools_controller.create(p, 100, 'localhost2', - group=pool_group, options={}) - self.addCleanup(self.pools_controller.delete, p) - - new_capabilities = {'fifo': False} - self.flavors_controller.update(name, project=self.project, - pool_group=pool_group, - capabilities={'fifo': False}) - res = self.flavors_controller.get(name, project=self.project, - detailed=True) - self._flavors_expects(res, name, self.project, pool_group) - self.assertEqual(new_capabilities, res['capabilities']) - - def test_delete_works(self): - name = 'puke' - self.flavors_controller.create(name, self.pool_group, - project=self.project, - capabilities={}) - self.flavors_controller.delete(name, project=self.project) - self.assertFalse(self.flavors_controller.exists(name)) - - def test_delete_nonexistent_is_silent(self): - self.flavors_controller.delete('nonexisting') - - def test_drop_all_leads_to_empty_listing(self): - self.flavors_controller.drop_all() - cursor = self.flavors_controller.list() - flavors = next(cursor) - self.assertRaises(StopIteration, next, flavors) - self.assertFalse(next(cursor)) - - def test_listing_simple(self): - name_gen = lambda i: chr(ord('A') + i) - for i in range(15): - pool = str(i) - pool_group = pool - uri = 'localhost:2701' + pool - self.pools_controller.create(pool, 100, uri, - group=pool_group, options={}) - self.addCleanup(self.pools_controller.delete, pool) - - self.flavors_controller.create(name_gen(i), project=self.project, - pool_group=pool_group, - capabilities={}) - - def get_res(**kwargs): - cursor = self.flavors_controller.list(project=self.project, - **kwargs) - res = list(next(cursor)) - marker = next(cursor) - self.assertTrue(marker) - return res - - res = get_res() - self.assertEqual(10, len(res)) - for i, entry in enumerate(res): - self._flavors_expects(entry, name_gen(i), self.project, str(i)) - self.assertNotIn('capabilities', entry) - - res = get_res(limit=5) - self.assertEqual(5, len(res)) - - res = get_res(marker=name_gen(3)) - self._flavors_expects(res[0], name_gen(4), self.project, '4') - - res = get_res(detailed=True) - self.assertEqual(10, len(res)) - for i, entry in enumerate(res): - self._flavors_expects(entry, name_gen(i), self.project, str(i)) - self.assertIn('capabilities', entry) - self.assertEqual({}, entry['capabilities']) - - -def _insert_fixtures(controller, queue_name, project=None, - client_uuid=None, num=4, ttl=120): - def messages(): - for n in six.moves.xrange(num): - yield { - 'ttl': ttl, - 'body': { - 'event': 'Event number {0}'.format(n) - }} - - return controller.post(queue_name, messages(), - project=project, client_uuid=client_uuid) diff --git a/zaqar/tests/unit/storage/sqlalchemy_migration/__init__.py b/zaqar/tests/unit/storage/sqlalchemy_migration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/storage/sqlalchemy_migration/test_db_manage_cli.py b/zaqar/tests/unit/storage/sqlalchemy_migration/test_db_manage_cli.py deleted file mode 100644 index e776f7ed..00000000 --- a/zaqar/tests/unit/storage/sqlalchemy_migration/test_db_manage_cli.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -import mock -import testscenarios -import testtools - -from zaqar.storage.sqlalchemy.migration import cli - - -class TestCli(testtools.TestCase): - func_name = '' - exp_args = () - exp_kwargs = {} - - scenarios = [ - ('stamp', - dict(argv=['prog', 'stamp', 'foo'], func_name='stamp', - exp_args=('foo',), exp_kwargs={'sql': False})), - ('stamp-sql', - dict(argv=['prog', 'stamp', 'foo', '--sql'], func_name='stamp', - exp_args=('foo',), exp_kwargs={'sql': True})), - ('current', - dict(argv=['prog', 'current'], func_name='current', - exp_args=[], exp_kwargs=dict())), - ('history', - dict(argv=['prog', 'history'], func_name='history', - exp_args=[], exp_kwargs=dict())), - ('check_migration', - dict(argv=['prog', 'check_migration'], func_name='branches', - exp_args=[], exp_kwargs=dict())), - ('sync_revision_autogenerate', - dict(argv=['prog', 'revision', '--autogenerate', '-m', 'message'], - func_name='revision', - exp_args=(), - exp_kwargs={ - 'message': 'message', 'sql': False, 'autogenerate': True})), - ('sync_revision_sql', - dict(argv=['prog', 'revision', '--sql', '-m', 'message'], - func_name='revision', - exp_args=(), - exp_kwargs={ - 'message': 'message', 'sql': True, 'autogenerate': False})), - ('upgrade-sql', - dict(argv=['prog', 'upgrade', '--sql', 'head'], - func_name='upgrade', - exp_args=('head',), - exp_kwargs={'sql': True})), - - ('upgrade-delta', - dict(argv=['prog', 'upgrade', '--delta', '3'], - func_name='upgrade', - exp_args=('+3',), - exp_kwargs={'sql': False})) - ] - - def setUp(self): - super(TestCli, self).setUp() - do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') - self.addCleanup(do_alembic_cmd_p.stop) - self.do_alembic_cmd = do_alembic_cmd_p.start() - self.addCleanup(cli.CONF.reset) - - def test_cli(self): - with mock.patch.object(sys, 'argv', self.argv): - cli.main() - self.do_alembic_cmd.assert_has_calls( - [mock.call( - mock.ANY, self.func_name, - *self.exp_args, **self.exp_kwargs)] - ) - - -def load_tests(loader, in_tests, pattern): - return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern) diff --git a/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations.py b/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations.py deleted file mode 100644 index aeccc3bf..00000000 --- a/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# Copyright 2014 Mirantis Inc -# Copyright 2016 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for database migrations. - -For the opportunistic testing you need to set up a db named 'openstack_citest' -with user 'openstack_citest' and password 'openstack_citest' on localhost. -The test will then use that db and u/p combo to run the tests. - -For postgres on Ubuntu this can be done with the following commands: - -sudo -u postgres psql -postgres=# create user openstack_citest with createdb login password - 'openstack_citest'; -postgres=# create database openstack_citest with owner openstack_citest; - -""" - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from zaqar.tests.unit.storage.sqlalchemy_migration import \ - test_migrations_base as base - - -class ZaqarMigrationsCheckers(object): - - def assertColumnExists(self, engine, table, column): - t = db_utils.get_table(engine, table) - self.assertIn(column, t.c) - - def assertColumnsExist(self, engine, table, columns): - for column in columns: - self.assertColumnExists(engine, table, column) - - def assertColumnType(self, engine, table, column, column_type): - t = db_utils.get_table(engine, table) - column_ref_type = str(t.c[column].type) - self.assertEqual(column_ref_type, column_type) - - def assertColumnCount(self, engine, table, columns): - t = db_utils.get_table(engine, table) - self.assertEqual(len(columns), len(t.columns)) - - def assertColumnNotExists(self, engine, table, column): - t = db_utils.get_table(engine, table) - self.assertNotIn(column, t.c) - - def assertIndexExists(self, engine, table, index): - t = db_utils.get_table(engine, table) - index_names = [idx.name for idx in t.indexes] - self.assertIn(index, index_names) - - def assertIndexMembers(self, engine, table, index, members): - self.assertIndexExists(engine, table, index) - - t = db_utils.get_table(engine, table) - index_columns = None - for idx in t.indexes: - if idx.name == index: - index_columns = idx.columns.keys() - break - - self.assertEqual(sorted(members), sorted(index_columns)) - - def test_walk_versions(self): - self.walk_versions(self.engine) - - def _pre_upgrade_001(self, engine): - # Anything returned from this method will be - # passed to corresponding _check_xxx method as 'data'. - pass - - def _check_001(self, engine, data): - queues_columns = [ - 'id', - 'name', - 'project', - 'metadata' - ] - self.assertColumnsExist( - engine, 'Queues', queues_columns) - self.assertColumnCount( - engine, 'Queues', queues_columns) - - poolgroup_columns = [ - 'name', - ] - self.assertColumnsExist( - engine, 'PoolGroup', poolgroup_columns) - self.assertColumnCount( - engine, 'PoolGroup', poolgroup_columns) - - pools_columns = [ - 'name', - 'group', - 'uri', - 'weight', - 'options', - ] - self.assertColumnsExist( - engine, 'Pools', pools_columns) - self.assertColumnCount( - engine, 'Pools', pools_columns) - - flavors_columns = [ - 'name', - 'project', - 'pool_group', - 'capabilities', - ] - self.assertColumnsExist( - engine, 'Flavors', flavors_columns) - self.assertColumnCount( - engine, 'Flavors', flavors_columns) - - catalogue_columns = [ - 'pool', - 'project', - 'queue', - ] - self.assertColumnsExist( - engine, 'Catalogue', catalogue_columns) - self.assertColumnCount( - engine, 'Catalogue', catalogue_columns) - - self._data_001(engine, data) - - def _data_001(self, engine, data): - project = 'myproject' - t = db_utils.get_table(engine, 'Queues') - engine.execute(t.insert(), id='123', name='name', project='myproject', - metadata={}) - new_project = engine.execute(t.select()).fetchone().project - self.assertEqual(project, new_project) - engine.execute(t.delete()) - - def _check_002(self, engine, data): - # currently, 002 is just a placeholder - pass - - def _check_003(self, engine, data): - # currently, 003 is just a placeholder - pass - - def _check_004(self, engine, data): - # currently, 004 is just a placeholder - pass - - def _check_005(self, engine, data): - # currently, 005 is just a placeholder - pass - - -class TestMigrationsMySQL(ZaqarMigrationsCheckers, - base.BaseWalkMigrationTestCase, - base.TestModelsMigrationsSync, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations_base.py b/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations_base.py deleted file mode 100644 index a1b319ef..00000000 --- a/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations_base.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012-2013 IBM Corp. -# Copyright 2016 Catalyst IT Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Ripped off from Nova's test_migrations.py -# The only difference between Nova and this code is usage of alembic instead -# of sqlalchemy migrations. -# -# There is an ongoing work to extact similar code to oslo incubator. Once it is -# extracted we'll be able to remove this file and use oslo. - -import io -import os - -import alembic -from alembic import command -from alembic import config as alembic_config -from alembic import migration -from alembic import script as alembic_script -from oslo_config import cfg -from oslo_db.sqlalchemy import test_migrations as t_m -from oslo_log import log as logging - -import zaqar.storage.sqlalchemy.migration -from zaqar.storage.sqlalchemy import tables - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -sqlalchemy_opts = [cfg.StrOpt('uri', - help='The SQLAlchemy connection string to' - ' use to connect to the database.', - secret=True)] - -CONF.register_opts(sqlalchemy_opts, - group='drivers:management_store:sqlalchemy') - - -class BaseWalkMigrationTestCase(object): - - ALEMBIC_CONFIG = alembic_config.Config( - os.path.join( - os.path.dirname(zaqar.storage.sqlalchemy.migration.__file__), - 'alembic.ini') - ) - - ALEMBIC_CONFIG.zaqar_config = CONF - - def _configure(self, engine): - """For each type of repository we should do some of configure steps. - - For migrate_repo we should set under version control our database. - For alembic we should configure database settings. For this goal we - should use oslo_config and openstack.commom.db.sqlalchemy.session with - database functionality (reset default settings and session cleanup). - """ - - CONF.set_override('uri', str(engine.url), - group='drivers:management_store:sqlalchemy') - - def _alembic_command(self, alembic_command, engine, *args, **kwargs): - """Most of alembic command return data into output. - - We should redefine this setting for getting info. - """ - self.ALEMBIC_CONFIG.stdout = buf = io.StringIO() - CONF.set_override('uri', str(engine.url), - group='drivers:management_store:sqlalchemy') - - getattr(command, alembic_command)(*args, **kwargs) - res = buf.getvalue().strip() - LOG.debug('Alembic command {command} returns: {result}'.format( - command=alembic_command, result=res)) - - return res - - def _get_versions(self): - """Stores a list of versions. - - Since alembic version has a random algorithm of generation - (SA-migrate has an ordered autoincrement naming) we should store - a list of versions (version for upgrade) - for successful testing of migrations in up mode. - """ - - env = alembic_script.ScriptDirectory.from_config(self.ALEMBIC_CONFIG) - versions = [] - for rev in env.walk_revisions(): - versions.append(rev.revision) - - versions.reverse() - return versions - - def walk_versions(self, engine=None): - # Determine latest version script from the repo, then - # upgrade from 1 through to the latest, with no data - # in the databases. This just checks that the schema itself - # upgrades successfully. - - self._configure(engine) - versions = self._get_versions() - for ver in versions: - self._migrate_up(engine, ver, with_data=True) - - def _get_version_from_db(self, engine): - """Returns latest version from db for each type of migrate repo.""" - - conn = engine.connect() - try: - context = migration.MigrationContext.configure(conn) - version = context.get_current_revision() or '-1' - finally: - conn.close() - return version - - def _migrate(self, engine, version, cmd): - """Base method for manipulation with migrate repo. - - It will upgrade or downgrade the actual database. - """ - - self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version) - - def _migrate_up(self, engine, version, with_data=False): - """migrate up to a new version of the db. - - We allow for data insertion and post checks at every - migration version with special _pre_upgrade_### and - _check_### functions in the main test. - """ - # NOTE(sdague): try block is here because it's impossible to debug - # where a failed data migration happens otherwise - check_version = version - try: - if with_data: - data = None - pre_upgrade = getattr( - self, "_pre_upgrade_%s" % check_version, None) - if pre_upgrade: - data = pre_upgrade(engine) - self._migrate(engine, version, 'upgrade') - self.assertEqual(version, self._get_version_from_db(engine)) - if with_data: - check = getattr(self, "_check_%s" % check_version, None) - if check: - check(engine, data) - except Exception: - LOG.error("Failed to migrate to version {version} on engine " - "{engine}".format(version=version, engine=engine)) - raise - - -class TestModelsMigrationsSync(t_m.ModelsMigrationsSync): - """Class for comparison of DB migration scripts and models. - - Allows to check if the DB schema obtained by applying of migration - scripts is equal to the one produced from models definitions. - """ - mg_path = os.path.dirname(zaqar.storage.sqlalchemy.migration.__file__) - ALEMBIC_CONFIG = alembic_config.Config( - os.path.join(mg_path, 'alembic.ini') - ) - ALEMBIC_CONFIG.zaqar_config = CONF - - def get_engine(self): - return self.engine - - def db_sync(self, engine): - CONF.set_override('uri', str(engine.url), - group='drivers:management_store:sqlalchemy') - script_location = os.path.join(self.mg_path, 'alembic_migrations') - self.ALEMBIC_CONFIG.set_main_option('script_location', script_location) - alembic.command.upgrade(self.ALEMBIC_CONFIG, 'head') - - def get_metadata(self): - return tables.metadata diff --git a/zaqar/tests/unit/storage/test_impl_mongodb.py b/zaqar/tests/unit/storage/test_impl_mongodb.py deleted file mode 100644 index cb6ef5eb..00000000 --- a/zaqar/tests/unit/storage/test_impl_mongodb.py +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import datetime -import time -import uuid - -import mock -from oslo_utils import timeutils -from pymongo import cursor -import pymongo.errors -import six -from testtools import matchers - -from zaqar.common import cache as oslo_cache -from zaqar.common import configs -from zaqar import storage -from zaqar.storage import errors -from zaqar.storage import mongodb -from zaqar.storage.mongodb import controllers -from zaqar.storage.mongodb import options -from zaqar.storage.mongodb import utils -from zaqar.storage import pooling -from zaqar import tests as testing -from zaqar.tests.unit.storage import base - - -class MongodbSetupMixin(object): - def _purge_databases(self): - if isinstance(self.driver, mongodb.DataDriver): - databases = (self.driver.message_databases + - [self.control.queues_database, - self.driver.subscriptions_database]) - else: - databases = [self.driver.queues_database] - - for db in databases: - self.driver.connection.drop_database(db) - - def _prepare_conf(self): - if options.MESSAGE_MONGODB_GROUP in self.conf: - self.config(options.MESSAGE_MONGODB_GROUP, - database=uuid.uuid4().hex) - - if options.MANAGEMENT_MONGODB_GROUP in self.conf: - self.config(options.MANAGEMENT_MONGODB_GROUP, - database=uuid.uuid4().hex) - - -class MongodbUtilsTest(MongodbSetupMixin, testing.TestBase): - - config_file = 'wsgi_mongodb.conf' - - def setUp(self): - super(MongodbUtilsTest, self).setUp() - - self.conf.register_opts(options.MESSAGE_MONGODB_OPTIONS, - group=options.MESSAGE_MONGODB_GROUP) - - self.mongodb_conf = self.conf[options.MESSAGE_MONGODB_GROUP] - - MockDriver = collections.namedtuple('MockDriver', 'mongodb_conf') - - self.driver = MockDriver(self.mongodb_conf) - self.control_driver = MockDriver(self.mongodb_conf) - - def test_scope_queue_name(self): - self.assertEqual('/my-q', utils.scope_queue_name('my-q')) - self.assertEqual('/my-q', utils.scope_queue_name('my-q', None)) - self.assertEqual('123/my-q', utils.scope_queue_name('my-q', '123')) - - self.assertEqual('/', utils.scope_queue_name(None)) - self.assertEqual('123/', utils.scope_queue_name(None, '123')) - - def test_descope_queue_name(self): - self.assertIsNone(utils.descope_queue_name('/')) - self.assertEqual('some-pig', utils.descope_queue_name('/some-pig')) - self.assertEqual('some-pig', - utils.descope_queue_name('radiant/some-pig')) - - def test_calculate_backoff(self): - sec = utils.calculate_backoff(0, 10, 2, 0) - self.assertEqual(0, sec) - - sec = utils.calculate_backoff(9, 10, 2, 0) - self.assertEqual(1.8, sec) - - sec = utils.calculate_backoff(4, 10, 2, 0) - self.assertEqual(0.8, sec) - - sec = utils.calculate_backoff(4, 10, 2, 1) - if sec != 0.8: - self.assertThat(sec, matchers.GreaterThan(0.8)) - self.assertThat(sec, matchers.LessThan(1.8)) - - self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, -2, -1) - self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, -2, 0) - self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, 2, -1) - - self.assertRaises(ValueError, utils.calculate_backoff, -2, 10, 2, 0) - self.assertRaises(ValueError, utils.calculate_backoff, -1, 10, 2, 0) - self.assertRaises(ValueError, utils.calculate_backoff, 10, 10, 2, 0) - self.assertRaises(ValueError, utils.calculate_backoff, 11, 10, 2, 0) - - def test_retries_on_autoreconnect(self): - num_calls = [0] - - @utils.retries_on_autoreconnect - def _raises_autoreconnect(self): - num_calls[0] += 1 - raise pymongo.errors.AutoReconnect() - - self.assertRaises(pymongo.errors.AutoReconnect, - _raises_autoreconnect, self) - self.assertEqual([self.mongodb_conf.max_reconnect_attempts], num_calls) - - def test_retries_on_autoreconnect_neg(self): - num_calls = [0] - - @utils.retries_on_autoreconnect - def _raises_autoreconnect(self): - num_calls[0] += 1 - - # NOTE(kgriffs): Don't exceed until the last attempt - if num_calls[0] < self.mongodb_conf.max_reconnect_attempts: - raise pymongo.errors.AutoReconnect() - - # NOTE(kgriffs): Test that this does *not* raise AutoReconnect - _raises_autoreconnect(self) - - self.assertEqual([self.mongodb_conf.max_reconnect_attempts], num_calls) - - -@testing.requires_mongodb -class MongodbDriverTest(MongodbSetupMixin, testing.TestBase): - - config_file = 'wsgi_mongodb.conf' - - def setUp(self): - super(MongodbDriverTest, self).setUp() - - self.conf.register_opts(configs._GENERAL_OPTIONS) - self.config(unreliable=False) - oslo_cache.register_config(self.conf) - - def test_db_instance(self): - self.config(unreliable=True) - cache = oslo_cache.get_cache(self.conf) - control = mongodb.ControlDriver(self.conf, cache) - data = mongodb.DataDriver(self.conf, cache, control) - - for db in data.message_databases: - self.assertThat(db.name, matchers.StartsWith( - data.mongodb_conf.database)) - - def test_version_match(self): - self.config(unreliable=True) - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('pymongo.MongoClient.server_info') as info: - info.return_value = {'version': '2.1'} - self.assertRaises(RuntimeError, mongodb.DataDriver, - self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - - info.return_value = {'version': '2.11'} - - try: - mongodb.DataDriver(self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - except RuntimeError: - self.fail('version match failed') - - def test_replicaset_or_mongos_needed(self): - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('pymongo.MongoClient.nodes') as nodes: - nodes.__get__ = mock.Mock(return_value=[]) - with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: - is_mongos.__get__ = mock.Mock(return_value=False) - self.assertRaises(RuntimeError, mongodb.DataDriver, - self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - - def test_using_replset(self): - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('pymongo.MongoClient.nodes') as nodes: - nodes.__get__ = mock.Mock(return_value=['node1', 'node2']) - - with mock.patch('pymongo.MongoClient.write_concern') as wc: - write_concern = pymongo.WriteConcern(w=2) - wc.__get__ = mock.Mock(return_value=write_concern) - mongodb.DataDriver(self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - - def test_using_mongos(self): - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: - is_mongos.__get__ = mock.Mock(return_value=True) - - with mock.patch('pymongo.MongoClient.write_concern') as wc: - write_concern = pymongo.WriteConcern(w=2) - wc.__get__ = mock.Mock(return_value=write_concern) - mongodb.DataDriver(self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - - def test_write_concern_check_works(self): - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: - is_mongos.__get__ = mock.Mock(return_value=True) - - with mock.patch('pymongo.MongoClient.write_concern') as wc: - write_concern = pymongo.WriteConcern(w=1) - wc.__get__ = mock.Mock(return_value=write_concern) - self.assertRaises(RuntimeError, mongodb.DataDriver, - self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - - write_concern = pymongo.WriteConcern(w=2) - wc.__get__ = mock.Mock(return_value=write_concern) - mongodb.DataDriver(self.conf, cache, - mongodb.ControlDriver(self.conf, cache)) - - def test_write_concern_is_set(self): - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: - is_mongos.__get__ = mock.Mock(return_value=True) - self.config(unreliable=True) - driver = mongodb.DataDriver(self.conf, cache, - mongodb.ControlDriver - (self.conf, cache)) - - driver.server_version = (2, 6) - - for db in driver.message_databases: - wc = db.write_concern - - self.assertEqual('majority', wc.document['w']) - self.assertFalse(wc.document['j']) - - -@testing.requires_mongodb -class MongodbQueueTests(MongodbSetupMixin, base.QueueControllerTest): - - driver_class = mongodb.ControlDriver - config_file = 'wsgi_mongodb.conf' - controller_class = controllers.QueueController - control_driver_class = mongodb.ControlDriver - - def test_indexes(self): - collection = self.controller._collection - indexes = collection.index_information() - self.assertIn('p_q_1', indexes) - - def test_raises_connection_error(self): - - with mock.patch.object(cursor.Cursor, - 'next' if six.PY2 else '__next__', - spec=True) as method: - error = pymongo.errors.ConnectionFailure() - method.side_effect = error - - queues = next(self.controller.list()) - self.assertRaises(storage.errors.ConnectionError, - queues.next) - - -@testing.requires_mongodb -class MongodbMessageTests(MongodbSetupMixin, base.MessageControllerTest): - - driver_class = mongodb.DataDriver - config_file = 'wsgi_mongodb.conf' - controller_class = controllers.MessageController - control_driver_class = mongodb.ControlDriver - - # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute - gc_interval = 60 - - def test_indexes(self): - for collection in self.controller._collections: - indexes = collection.index_information() - self.assertIn('active', indexes) - self.assertIn('claimed', indexes) - self.assertIn('queue_marker', indexes) - self.assertIn('counting', indexes) - - def test_message_counter(self): - queue_name = self.queue_name - iterations = 10 - - m = mock.MagicMock(controllers.QueueController) - self.controller._queue_ctrl = m - del self.controller._queue_ctrl._get_counter - del self.controller._queue_ctrl._inc_counter - - seed_marker1 = self.controller._get_counter(queue_name, - self.project) - self.assertEqual(0, seed_marker1, 'First marker is 0') - - for i in range(iterations): - self.controller.post(queue_name, [{'ttl': 60}], - 'uuid', project=self.project) - - marker1 = self.controller._get_counter(queue_name, - self.project) - marker2 = self.controller._get_counter(queue_name, - self.project) - marker3 = self.controller._get_counter(queue_name, - self.project) - - self.assertEqual(marker1, marker2) - self.assertEqual(marker2, marker3) - self.assertEqual(i + 1, marker1) - - new_value = self.controller._inc_counter(queue_name, - self.project) - self.assertIsNotNone(new_value) - - value_before = self.controller._get_counter(queue_name, - project=self.project) - new_value = self.controller._inc_counter(queue_name, - project=self.project) - self.assertIsNotNone(new_value) - value_after = self.controller._get_counter(queue_name, - project=self.project) - self.assertEqual(value_before + 1, value_after) - - value_before = value_after - new_value = self.controller._inc_counter(queue_name, - project=self.project, - amount=7) - value_after = self.controller._get_counter(queue_name, - project=self.project) - self.assertEqual(value_before + 7, value_after) - self.assertEqual(new_value, value_after) - - reference_value = value_after - - unchanged = self.controller._inc_counter(queue_name, - project=self.project, - window=10) - self.assertIsNone(unchanged) - - timeutils.set_time_override() - timeutils.advance_time_delta(datetime.timedelta(seconds=10)) - - changed = self.controller._inc_counter(queue_name, - project=self.project, - window=5) - self.assertEqual(reference_value + 1, changed) - - timeutils.clear_time_override() - - -@testing.requires_mongodb -class MongodbFIFOMessageTests(MongodbSetupMixin, base.MessageControllerTest): - - driver_class = mongodb.FIFODataDriver - config_file = 'wsgi_fifo_mongodb.conf' - controller_class = controllers.FIFOMessageController - control_driver_class = mongodb.ControlDriver - - # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute - gc_interval = 60 - - def test_race_condition_on_post(self): - queue_name = self.queue_name - - expected_messages = [ - { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'd378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - { - 'ttl': 60, - 'body': { - 'event': 'BackupStarted', - 'backupId': 'e378813c-3f0b-11e2-ad92-7823d2b0f3ce', - }, - }, - ] - - uuid = '97b64000-2526-11e3-b088-d85c1300734c' - - # NOTE(kgriffs): Patch _inc_counter so it is a noop, so that - # the second time we post, we will get a collision. This simulates - # what happens when we have parallel requests and the "winning" - # requests hasn't gotten around to calling _inc_counter before the - # "losing" request attempts to insert it's batch of messages. - with mock.patch.object(mongodb.messages.MessageController, - '_inc_counter', autospec=True) as ic: - - ic.return_value = 2 - messages = expected_messages[:1] - created = list(self.controller.post(queue_name, - messages, uuid, - project=self.project)) - self.assertEqual(1, len(created)) - - # Force infinite retries - ic.return_value = None - - with testing.expect(errors.MessageConflict): - self.controller.post(queue_name, messages, - uuid, project=self.project) - - created = list(self.controller.post(queue_name, - expected_messages[1:], - uuid, project=self.project)) - - self.assertEqual(2, len(created)) - - expected_ids = [m['body']['backupId'] for m in expected_messages] - - interaction = self.controller.list(queue_name, client_uuid=uuid, - echo=True, project=self.project) - - actual_messages = list(next(interaction)) - self.assertEqual(len(expected_messages), len(actual_messages)) - actual_ids = [m['body']['backupId'] for m in actual_messages] - - self.assertEqual(expected_ids, actual_ids) - - -@testing.requires_mongodb -class MongodbClaimTests(MongodbSetupMixin, base.ClaimControllerTest): - - driver_class = mongodb.DataDriver - config_file = 'wsgi_mongodb.conf' - controller_class = controllers.ClaimController - control_driver_class = mongodb.ControlDriver - - def test_claim_doesnt_exist(self): - """Verifies that operations fail on expired/missing claims. - - Methods should raise an exception when the claim doesn't - exists and/or has expired. - """ - epoch = '000000000000000000000000' - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.get, self.queue_name, - epoch, project=self.project) - - claim_id, messages = self.controller.create(self.queue_name, - {'ttl': 1, 'grace': 0}, - project=self.project) - - # Lets let it expire - time.sleep(1) - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.update, self.queue_name, - claim_id, {'ttl': 1, 'grace': 0}, - project=self.project) - - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.update, self.queue_name, - claim_id, {'ttl': 1, 'grace': 0}, - project=self.project) - - -@testing.requires_mongodb -class MongodbSubscriptionTests(MongodbSetupMixin, - base.SubscriptionControllerTest): - driver_class = mongodb.DataDriver - config_file = 'wsgi_mongodb.conf' - controller_class = controllers.SubscriptionController - control_driver_class = mongodb.ControlDriver - - -# -# TODO(kgriffs): Do these need database purges as well as those above? -# - -@testing.requires_mongodb -class MongodbPoolsTests(base.PoolsControllerTest): - config_file = 'wsgi_mongodb.conf' - driver_class = mongodb.ControlDriver - controller_class = controllers.PoolsController - control_driver_class = mongodb.ControlDriver - - def setUp(self): - super(MongodbPoolsTests, self).setUp() - - def tearDown(self): - super(MongodbPoolsTests, self).tearDown() - - def test_delete_pool_used_by_flavor(self): - self.flavors_controller.create('durable', self.pool_group, - project=self.project, - capabilities={}) - - with testing.expect(errors.PoolInUseByFlavor): - self.pools_controller.delete(self.pool) - - def test_mismatching_capabilities_fifo(self): - with testing.expect(errors.PoolCapabilitiesMismatch): - self.pools_controller.create(str(uuid.uuid1()), - 100, 'mongodb.fifo://localhost', - group=self.pool_group, - options={}) - - def test_duplicate_uri(self): - with testing.expect(errors.PoolAlreadyExists): - # The url 'localhost' is used in setUp(). So reusing the uri - # 'localhost' here will raise PoolAlreadyExists. - self.pools_controller.create(str(uuid.uuid1()), 100, 'localhost', - group=str(uuid.uuid1()), options={}) - - -@testing.requires_mongodb -class MongodbCatalogueTests(base.CatalogueControllerTest): - driver_class = mongodb.ControlDriver - controller_class = controllers.CatalogueController - control_driver_class = mongodb.ControlDriver - config_file = 'wsgi_mongodb.conf' - - def setUp(self): - super(MongodbCatalogueTests, self).setUp() - self.addCleanup(self.controller.drop_all) - - -@testing.requires_mongodb -class PooledMessageTests(base.MessageControllerTest): - config_file = 'wsgi_mongodb_pooled.conf' - controller_class = pooling.MessageController - driver_class = pooling.DataDriver - control_driver_class = mongodb.ControlDriver - controller_base_class = storage.Message - - # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute - gc_interval = 60 - - -@testing.requires_mongodb -class PooledClaimsTests(base.ClaimControllerTest): - config_file = 'wsgi_mongodb_pooled.conf' - controller_class = pooling.ClaimController - driver_class = pooling.DataDriver - control_driver_class = mongodb.ControlDriver - controller_base_class = storage.Claim - - def test_delete_message_expired_claim(self): - # NOTE(flaper87): The pool tests uses sqlalchemy - # as one of the pools, which causes this test to fail. - # Several reasons to do this: - # The sqla driver is deprecated - # It's not optimized - # mocking utcnow mocks the driver too, which - # requires to put sleeps in the test - self.skip("Fix sqlalchemy driver") - - -@testing.requires_mongodb -class MongodbFlavorsTest(base.FlavorsControllerTest): - driver_class = mongodb.ControlDriver - controller_class = controllers.FlavorsController - control_driver_class = mongodb.ControlDriver - config_file = 'wsgi_mongodb.conf' - - def setUp(self): - super(MongodbFlavorsTest, self).setUp() - self.addCleanup(self.controller.drop_all) diff --git a/zaqar/tests/unit/storage/test_impl_redis.py b/zaqar/tests/unit/storage/test_impl_redis.py deleted file mode 100644 index 87b689e8..00000000 --- a/zaqar/tests/unit/storage/test_impl_redis.py +++ /dev/null @@ -1,475 +0,0 @@ -# Copyright (c) 2014 Prashanth Raghu. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import time -import uuid - -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils -import redis - -from zaqar.common import cache as oslo_cache -from zaqar.common import errors -from zaqar import storage -from zaqar.storage import mongodb -from zaqar.storage.redis import controllers -from zaqar.storage.redis import driver -from zaqar.storage.redis import messages -from zaqar.storage.redis import options -from zaqar.storage.redis import utils -from zaqar import tests as testing -from zaqar.tests.unit.storage import base - - -def _create_sample_message(now=None, claimed=False, body=None): - if now is None: - now = timeutils.utcnow_ts() - - if claimed: - claim_id = uuid.uuid4() - claim_expires = now + 300 - else: - claim_id = None - claim_expires = now - - if body is None: - body = {} - - return messages.Message( - ttl=60, - created=now, - client_uuid=uuid.uuid4(), - claim_id=claim_id, - claim_expires=claim_expires, - body=body - ) - - -class RedisUtilsTest(testing.TestBase): - - config_file = 'wsgi_redis.conf' - - def setUp(self): - super(RedisUtilsTest, self).setUp() - - self.conf.register_opts(options.MESSAGE_REDIS_OPTIONS, - group=options.MESSAGE_REDIS_GROUP) - - self.redis_conf = self.conf[options.MESSAGE_REDIS_GROUP] - - MockDriver = collections.namedtuple('MockDriver', 'redis_conf') - - self.driver = MockDriver(self.redis_conf) - - def test_scope_queue_name(self): - self.assertEqual('.my-q', utils.scope_queue_name('my-q')) - self.assertEqual('.my-q', utils.scope_queue_name('my-q', None)) - self.assertEqual('123.my-q', utils.scope_queue_name('my-q', '123')) - self.assertEqual('123.my-q_1', utils.scope_queue_name('my-q_1', '123')) - - self.assertEqual('.', utils.scope_queue_name()) - self.assertEqual('123.', utils.scope_queue_name(None, '123')) - - def test_scope_messages_set(self): - self.assertEqual('.my-q.', utils.scope_message_ids_set('my-q')) - self.assertEqual('p.my-q.', utils.scope_message_ids_set('my-q', 'p')) - self.assertEqual('p.my-q.s', - utils.scope_message_ids_set('my-q', 'p', 's')) - - self.assertEqual('..', utils.scope_message_ids_set(None)) - self.assertEqual('123..', utils.scope_message_ids_set(None, '123')) - self.assertEqual('..s', utils.scope_message_ids_set(None, None, 's')) - - def test_descope_messages_set(self): - key = utils.scope_message_ids_set('my-q') - self.assertEqual(('my-q', None), utils.descope_message_ids_set(key)) - - key = utils.scope_message_ids_set('my-q', '123') - self.assertEqual(('my-q', '123'), utils.descope_message_ids_set(key)) - - key = utils.scope_message_ids_set(None, '123') - self.assertEqual((None, '123'), utils.descope_message_ids_set(key)) - - key = utils.scope_message_ids_set() - self.assertEqual((None, None), utils.descope_message_ids_set(key)) - - def test_normalize_none_str(self): - - self.assertEqual('my-q', utils.normalize_none_str('my-q')) - self.assertEqual('', utils.normalize_none_str(None)) - - def test_msg_claimed_filter(self): - now = timeutils.utcnow_ts() - - unclaimed_msg = _create_sample_message() - self.assertFalse(utils.msg_claimed_filter(unclaimed_msg, now)) - - claimed_msg = _create_sample_message(claimed=True) - self.assertTrue(utils.msg_claimed_filter(claimed_msg, now)) - - # NOTE(kgriffs): Has a claim ID, but the claim is expired - claimed_msg.claim_expires = now - 60 - self.assertFalse(utils.msg_claimed_filter(claimed_msg, now)) - - def test_descope_queue_name(self): - self.assertEqual('q', utils.descope_queue_name('p.q')) - self.assertEqual('q', utils.descope_queue_name('.q')) - self.assertEqual('', utils.descope_queue_name('.')) - - def test_msg_echo_filter(self): - msg = _create_sample_message() - self.assertTrue(utils.msg_echo_filter(msg, msg.client_uuid)) - - alt_uuid = uuidutils.generate_uuid() - self.assertFalse(utils.msg_echo_filter(msg, alt_uuid)) - - def test_basic_message(self): - now = timeutils.utcnow_ts() - body = { - 'msg': 'Hello Earthlings!', - 'unicode': u'ab\u00e7', - 'bytes': b'ab\xc3\xa7', - b'ab\xc3\xa7': 'one, two, three', - u'ab\u00e7': 'one, two, three', - } - - msg = _create_sample_message(now=now, body=body) - basic_msg = msg.to_basic(now + 5) - - self.assertEqual(msg.id, basic_msg['id']) - self.assertEqual(5, basic_msg['age']) - self.assertEqual(body, basic_msg['body']) - self.assertEqual(msg.ttl, basic_msg['ttl']) - - def test_retries_on_connection_error(self): - num_calls = [0] - - @utils.retries_on_connection_error - def _raises_connection_error(self): - num_calls[0] += 1 - raise redis.exceptions.ConnectionError - - self.assertRaises(redis.exceptions.ConnectionError, - _raises_connection_error, self) - self.assertEqual([self.redis_conf.max_reconnect_attempts], num_calls) - - -@testing.requires_redis -class RedisDriverTest(testing.TestBase): - - config_file = 'wsgi_redis.conf' - - def test_db_instance(self): - oslo_cache.register_config(self.conf) - cache = oslo_cache.get_cache(self.conf) - redis_driver = driver.DataDriver(self.conf, cache, - driver.ControlDriver - (self.conf, cache)) - - self.assertIsInstance(redis_driver.connection, redis.StrictRedis) - - def test_version_match(self): - oslo_cache.register_config(self.conf) - cache = oslo_cache.get_cache(self.conf) - - with mock.patch('redis.StrictRedis.info') as info: - info.return_value = {'redis_version': '2.4.6'} - self.assertRaises(RuntimeError, driver.DataDriver, - self.conf, cache, - driver.ControlDriver(self.conf, cache)) - - info.return_value = {'redis_version': '2.11'} - - try: - driver.DataDriver(self.conf, cache, - driver.ControlDriver(self.conf, cache)) - except RuntimeError: - self.fail('version match failed') - - def test_connection_url_invalid(self): - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'red://example.com') - - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'redis://') - - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'redis://example.com:not_an_integer') - - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'redis://s1:not_an_integer,s2?master=obi-wan') - - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'redis://s1,s2') - - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'redis:') - - self.assertRaises(errors.ConfigurationError, - driver.ConnectionURI, - 'redis:') - - def test_connection_url_tcp(self): - uri = driver.ConnectionURI('redis://example.com') - self.assertEqual(driver.STRATEGY_TCP, uri.strategy) - self.assertEqual(6379, uri.port) - self.assertEqual(0.1, uri.socket_timeout) - - uri = driver.ConnectionURI('redis://example.com:7777') - self.assertEqual(driver.STRATEGY_TCP, uri.strategy) - self.assertEqual(7777, uri.port) - - uri = driver.ConnectionURI( - 'redis://example.com:7777?socket_timeout=1') - self.assertEqual(driver.STRATEGY_TCP, uri.strategy) - self.assertEqual(7777, uri.port) - self.assertEqual(1.0, uri.socket_timeout) - - def test_connection_uri_unix_socket(self): - uri = driver.ConnectionURI('redis:/tmp/redis.sock') - self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) - self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) - self.assertEqual(0.1, uri.socket_timeout) - - uri = driver.ConnectionURI('redis:/tmp/redis.sock?socket_timeout=1.5') - self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) - self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) - self.assertEqual(1.5, uri.socket_timeout) - - def test_connection_uri_sentinel(self): - uri = driver.ConnectionURI('redis://s1?master=dumbledore') - self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) - self.assertEqual([('s1', 26379)], uri.sentinels) - self.assertEqual('dumbledore', uri.master) - self.assertEqual(0.1, uri.socket_timeout) - - uri = driver.ConnectionURI('redis://s1,s2?master=dumbledore') - self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) - self.assertEqual([('s1', 26379), ('s2', 26379)], uri.sentinels) - self.assertEqual('dumbledore', uri.master) - self.assertEqual(0.1, uri.socket_timeout) - - uri = driver.ConnectionURI('redis://s1:26389,s1?master=dumbledore') - self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) - self.assertEqual([('s1', 26389), ('s1', 26379)], uri.sentinels) - self.assertEqual('dumbledore', uri.master) - self.assertEqual(0.1, uri.socket_timeout) - - uri = driver.ConnectionURI( - 'redis://s1?master=dumbledore&socket_timeout=0.5') - self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) - self.assertEqual([('s1', 26379)], uri.sentinels) - self.assertEqual('dumbledore', uri.master) - self.assertEqual(0.5, uri.socket_timeout) - - -@testing.requires_redis -class RedisQueuesTest(base.QueueControllerTest): - - driver_class = driver.DataDriver - config_file = 'wsgi_redis.conf' - controller_class = controllers.QueueController - control_driver_class = mongodb.ControlDriver - - def setUp(self): - super(RedisQueuesTest, self).setUp() - self.connection = self.driver.connection - self.msg_controller = self.driver.message_controller - - def tearDown(self): - super(RedisQueuesTest, self).tearDown() - self.connection.flushdb() - - -@testing.requires_redis -class RedisMessagesTest(base.MessageControllerTest): - driver_class = driver.DataDriver - config_file = 'wsgi_redis.conf' - controller_class = controllers.MessageController - control_driver_class = mongodb.ControlDriver - gc_interval = 1 - - def setUp(self): - super(RedisMessagesTest, self).setUp() - self.connection = self.driver.connection - - def tearDown(self): - super(RedisMessagesTest, self).tearDown() - self.connection.flushdb() - - def test_count(self): - queue_name = 'get-count' - self.queue_controller.create(queue_name) - - msgs = [{ - 'ttl': 300, - 'body': 'di mo fy' - } for i in range(0, 10)] - - client_id = uuid.uuid4() - # Creating 10 messages - self.controller.post(queue_name, msgs, client_id) - - num_msg = self.controller._count(queue_name, None) - self.assertEqual(10, num_msg) - - def test_empty_queue_exception(self): - queue_name = 'empty-queue-test' - self.queue_controller.create(queue_name) - - self.assertRaises(storage.errors.QueueIsEmpty, - self.controller.first, queue_name) - - def test_gc(self): - self.queue_controller.create(self.queue_name) - self.controller.post(self.queue_name, - [{'ttl': 0, 'body': {}}], - client_uuid=uuidutils.generate_uuid()) - - num_removed = self.controller.gc() - self.assertEqual(1, num_removed) - - for _ in range(100): - self.controller.post(self.queue_name, - [{'ttl': 0, 'body': {}}], - client_uuid=uuidutils.generate_uuid()) - - num_removed = self.controller.gc() - self.assertEqual(100, num_removed) - - def test_invalid_uuid(self): - queue_name = 'invalid-uuid-test' - msgs = [{ - 'ttl': 300, - 'body': 'di mo fy' - }] - client_id = "invalid_uuid" - self.assertRaises(ValueError, self.controller.post, queue_name, msgs, - client_id) - - -@testing.requires_redis -class RedisClaimsTest(base.ClaimControllerTest): - driver_class = driver.DataDriver - config_file = 'wsgi_redis.conf' - controller_class = controllers.ClaimController - control_driver_class = mongodb.ControlDriver - - def setUp(self): - super(RedisClaimsTest, self).setUp() - self.connection = self.driver.connection - - def tearDown(self): - super(RedisClaimsTest, self).tearDown() - self.connection.flushdb() - - def test_claim_doesnt_exist(self): - queue_name = 'no-such-claim' - epoch = '000000000000000000000000' - self.queue_controller.create(queue_name) - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.get, queue_name, - epoch, project=None) - - claim_id, messages = self.controller.create(queue_name, {'ttl': 1, - 'grace': 0}, - project=None) - - # Lets let it expire - time.sleep(1) - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.update, queue_name, - claim_id, {}, project=None) - - # create a claim and then delete the queue - claim_id, messages = self.controller.create(queue_name, {'ttl': 100, - 'grace': 0}, - project=None) - self.queue_controller.delete(queue_name) - - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.get, queue_name, - claim_id, project=None) - - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.update, queue_name, - claim_id, {}, project=None) - - def test_get_claim_after_expires(self): - queue_name = 'no-such-claim' - self.queue_controller.create(queue_name, project='fake_project') - new_messages = [{'ttl': 60, 'body': {}}, - {'ttl': 60, 'body': {}}, - {'ttl': 60, 'body': {}}] - self.message_controller.post(queue_name, new_messages, - client_uuid=str(uuid.uuid4()), - project='fake_project') - claim_id, messages = self.controller.create(queue_name, {'ttl': 1, - 'grace': 0}, - project='fake_project') - # Lets let it expire - time.sleep(2) - self.assertRaises(storage.errors.ClaimDoesNotExist, - self.controller.get, queue_name, - claim_id, project='fake_project') - - def test_gc(self): - self.queue_controller.create(self.queue_name) - - for _ in range(100): - self.message_controller.post(self.queue_name, - [{'ttl': 300, 'body': 'yo gabba'}], - client_uuid=uuidutils.generate_uuid()) - - now = timeutils.utcnow_ts() - timeutils_utcnow = 'oslo_utils.timeutils.utcnow_ts' - - # Test a single claim - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - 1 - self.controller.create(self.queue_name, {'ttl': 1, 'grace': 60}) - - num_removed = self.controller._gc(self.queue_name, None) - self.assertEqual(1, num_removed) - - # Test multiple claims - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - 1 - - for _ in range(5): - self.controller.create(self.queue_name, - {'ttl': 1, 'grace': 60}) - - # NOTE(kgriffs): These ones should not be cleaned up - self.controller.create(self.queue_name, {'ttl': 60, 'grace': 60}) - self.controller.create(self.queue_name, {'ttl': 60, 'grace': 60}) - - num_removed = self.controller._gc(self.queue_name, None) - self.assertEqual(5, num_removed) - - -@testing.requires_redis -class RedisSubscriptionTests(base.SubscriptionControllerTest): - driver_class = driver.DataDriver - config_file = 'wsgi_redis.conf' - controller_class = controllers.SubscriptionController - control_driver_class = driver.ControlDriver diff --git a/zaqar/tests/unit/storage/test_impl_sqlalchemy.py b/zaqar/tests/unit/storage/test_impl_sqlalchemy.py deleted file mode 100644 index 226a00d7..00000000 --- a/zaqar/tests/unit/storage/test_impl_sqlalchemy.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import six - -from zaqar.storage import sqlalchemy -from zaqar.storage.sqlalchemy import controllers -from zaqar.storage.sqlalchemy import tables -from zaqar.storage.sqlalchemy import utils -from zaqar import tests as testing -from zaqar.tests.unit.storage import base - - -class DBCreateMixin(object): - - def _prepare_conf(self): - tables.metadata.create_all(self.driver.engine) - - -class SqlalchemyQueueTests(DBCreateMixin, base.QueueControllerTest): - driver_class = sqlalchemy.ControlDriver - config_file = 'wsgi_sqlalchemy.conf' - controller_class = controllers.QueueController - control_driver_class = sqlalchemy.ControlDriver - - -class SqlalchemyPoolsTest(DBCreateMixin, base.PoolsControllerTest): - config_file = 'wsgi_sqlalchemy.conf' - driver_class = sqlalchemy.ControlDriver - controller_class = controllers.PoolsController - control_driver_class = sqlalchemy.ControlDriver - - -class SqlalchemyCatalogueTest(DBCreateMixin, base.CatalogueControllerTest): - config_file = 'wsgi_sqlalchemy.conf' - driver_class = sqlalchemy.ControlDriver - controller_class = controllers.CatalogueController - control_driver_class = sqlalchemy.ControlDriver - - -class SqlalchemyFlavorsTest(DBCreateMixin, base.FlavorsControllerTest): - config_file = 'wsgi_sqlalchemy.conf' - driver_class = sqlalchemy.ControlDriver - controller_class = controllers.FlavorsController - control_driver_class = sqlalchemy.ControlDriver - - -class MsgidTests(testing.TestBase): - - def test_encode(self): - if six.PY2: - ids = [3, long(1), 0] - elif six.PY3: - ids = [3, 1, 0] - msgids = ['5c693a50', '5c693a52', '5c693a53'] - for msgid, id in zip(msgids, ids): - self.assertEqual(msgid, utils.msgid_encode(id)) - - def test_decode(self): - msgids = ['5c693a50', '5c693a52', '5c693a53', ''] - ids = [3, 1, 0, None] - for msgid, id in zip(msgids, ids): - self.assertEqual(id, utils.msgid_decode(msgid)) diff --git a/zaqar/tests/unit/storage/test_impl_swift.py b/zaqar/tests/unit/storage/test_impl_swift.py deleted file mode 100644 index 5be91e54..00000000 --- a/zaqar/tests/unit/storage/test_impl_swift.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from zaqar.common import cache as oslo_cache -from zaqar.storage import mongodb -from zaqar.storage.swift import controllers -from zaqar.storage.swift import driver -from zaqar import tests as testing -from zaqar.tests.unit.storage import base - - -@testing.requires_swift -class SwiftMessagesTest(base.MessageControllerTest): - driver_class = driver.DataDriver - config_file = 'wsgi_swift.conf' - controller_class = controllers.MessageController - control_driver_class = mongodb.ControlDriver - gc_interval = 1 - - -@testing.requires_swift -class SwiftClaimsTest(base.ClaimControllerTest): - driver_class = driver.DataDriver - config_file = 'wsgi_swift.conf' - controller_class = controllers.ClaimController - control_driver_class = mongodb.ControlDriver - - -@testing.requires_swift -class SwiftSubscriptionsTest(base.SubscriptionControllerTest): - driver_class = driver.DataDriver - config_file = 'wsgi_swift.conf' - controller_class = controllers.SubscriptionController - control_driver_class = mongodb.ControlDriver - - -@testing.requires_swift -class SwiftDriverTest(testing.TestBase): - config_file = 'wsgi_swift.conf' - - def test_is_alive(self): - oslo_cache.register_config(self.conf) - cache = oslo_cache.get_cache(self.conf) - swift_driver = driver.DataDriver(self.conf, cache, - mongodb.ControlDriver - (self.conf, cache)) - - self.assertTrue(swift_driver.is_alive()) diff --git a/zaqar/tests/unit/storage/test_pool_catalog.py b/zaqar/tests/unit/storage/test_pool_catalog.py deleted file mode 100644 index 876e02a7..00000000 --- a/zaqar/tests/unit/storage/test_pool_catalog.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import mock -import uuid - -from zaqar.common import cache as oslo_cache -from zaqar.storage import errors -from zaqar.storage import mongodb -from zaqar.storage import pooling -from zaqar.storage import utils -from zaqar import tests as testing - - -# TODO(cpp-cabrera): it would be wonderful to refactor this unit test -# so that it could use multiple control storage backends once those -# have pools/catalogue implementations. -@testing.requires_mongodb -class PoolCatalogTest(testing.TestBase): - - config_file = 'wsgi_mongodb_pooled_disable_virtual_pool.conf' - - def setUp(self): - super(PoolCatalogTest, self).setUp() - - oslo_cache.register_config(self.conf) - cache = oslo_cache.get_cache(self.conf) - control = utils.load_storage_driver(self.conf, cache, - control_mode=True) - - self.pools_ctrl = control.pools_controller - self.flavors_ctrl = control.flavors_controller - self.catalogue_ctrl = control.catalogue_controller - - # NOTE(cpp-cabrera): populate catalogue - self.pool = str(uuid.uuid1()) - self.pool2 = str(uuid.uuid1()) - self.pool_group = 'pool-group' - self.queue = str(uuid.uuid1()) - self.flavor = str(uuid.uuid1()) - self.project = str(uuid.uuid1()) - - # FIXME(therve) This is horrible, we need to manage duplication in a - # nicer way - if 'localhost' in self.mongodb_url: - other_url = self.mongodb_url.replace('localhost', '127.0.0.1') - elif '127.0.0.1' in self.mongodb_url: - other_url = self.mongodb_url.replace('127.0.0.1', 'localhost') - else: - self.skipTest("Can't build a dummy mongo URL.") - - self.pools_ctrl.create(self.pool, 100, self.mongodb_url) - self.pools_ctrl.create(self.pool2, 100, - other_url, - group=self.pool_group) - self.catalogue_ctrl.insert(self.project, self.queue, self.pool) - self.catalog = pooling.Catalog(self.conf, cache, control) - self.flavors_ctrl.create(self.flavor, self.pool_group, - project=self.project) - - def tearDown(self): - self.catalogue_ctrl.drop_all() - self.pools_ctrl.drop_all() - super(PoolCatalogTest, self).tearDown() - - def test_lookup_loads_correct_driver(self): - storage = self.catalog.lookup(self.queue, self.project) - self.assertIsInstance(storage._storage, mongodb.DataDriver) - - def test_lookup_returns_default_or_none_if_queue_not_mapped(self): - # Return default - self.assertIsNone(self.catalog.lookup('not', 'mapped')) - - self.config(message_store='faulty', group='drivers') - self.config(enable_virtual_pool=True, group='pooling:catalog') - self.assertIsNotNone(self.catalog.lookup('not', 'mapped')) - - def test_lookup_returns_none_if_entry_deregistered(self): - self.catalog.deregister(self.queue, self.project) - self.assertIsNone(self.catalog.lookup(self.queue, self.project)) - - def test_register_leads_to_successful_lookup(self): - self.catalog.register('not_yet', 'mapped') - storage = self.catalog.lookup('not_yet', 'mapped') - self.assertIsInstance(storage._storage, mongodb.DataDriver) - - def test_register_with_flavor(self): - queue = 'test' - self.catalog.register(queue, project=self.project, - flavor=self.flavor) - storage = self.catalog.lookup(queue, self.project) - self.assertIsInstance(storage._storage, mongodb.DataDriver) - - def test_register_with_fake_flavor(self): - self.assertRaises(errors.FlavorDoesNotExist, - self.catalog.register, - 'test', project=self.project, - flavor='fake') - - def test_queues_list_on_multi_pools(self): - def fake_list(project=None, marker=None, limit=10, detailed=False): - yield iter([{'name': 'fake_queue'}]) - - list_str = 'zaqar.storage.mongodb.queues.QueueController.list' - with mock.patch(list_str) as queues_list: - queues_list.side_effect = fake_list - queue_controller = pooling.QueueController(self.catalog) - result = queue_controller.list(project=self.project) - queue_list = list(next(result)) - self.assertEqual(1, len(queue_list)) - - def test_queue_create_with_empty_json_body(self): - queue_controller = pooling.QueueController(self.catalog) - with mock.patch('zaqar.storage.pooling.Catalog.register') as register: - queue_controller.create(self.queue, metadata={}, - project=self.project) - register.assert_called_with(self.queue, project=self.project, - flavor=None) diff --git a/zaqar/tests/unit/storage/test_utils.py b/zaqar/tests/unit/storage/test_utils.py deleted file mode 100644 index a25029f3..00000000 --- a/zaqar/tests/unit/storage/test_utils.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2017 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import mock - -from zaqar.storage import utils -from zaqar import tests as testing - - -class StorageUtilsTest(testing.TestBase): - config_file = 'wsgi_swift.conf' - - def test_can_connect(self): - swift_uri = "swift://zaqar:password@/service" - is_alive_path = 'zaqar.storage.swift.driver.DataDriver.is_alive' - with mock.patch(is_alive_path) as is_alive: - is_alive.return_value = True - self.assertTrue(utils.can_connect(swift_uri, self.conf)) diff --git a/zaqar/tests/unit/test_bootstrap.py b/zaqar/tests/unit/test_bootstrap.py deleted file mode 100644 index db3ecda2..00000000 --- a/zaqar/tests/unit/test_bootstrap.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zaqar import bootstrap -from zaqar.common import errors -from zaqar.storage import pooling -from zaqar.tests import base -from zaqar.tests import helpers -from zaqar.transport import websocket -from zaqar.transport import wsgi - - -class TestBootstrap(base.TestBase): - - def _bootstrap(self, conf_file): - conf_file = helpers.override_mongo_conf(conf_file, self) - self.conf = self.load_conf(conf_file) - return bootstrap.Bootstrap(self.conf) - - def test_storage_invalid(self): - bootstrap = self._bootstrap('drivers_storage_invalid.conf') - self.assertRaises(errors.InvalidDriver, - lambda: bootstrap.storage) - - def test_storage_mongodb_pooled(self): - """Makes sure we can load the pool driver.""" - bootstrap = self._bootstrap('wsgi_mongodb_pooled.conf') - self.assertIsInstance(bootstrap.storage._storage, pooling.DataDriver) - - def test_transport_invalid(self): - bootstrap = self._bootstrap('drivers_transport_invalid.conf') - self.assertRaises(errors.InvalidDriver, - lambda: bootstrap.transport) - - def test_transport_wsgi(self): - bootstrap = self._bootstrap('wsgi_mongodb.conf') - self.assertIsInstance(bootstrap.transport, wsgi.Driver) - - def test_transport_websocket(self): - bootstrap = self._bootstrap('websocket_mongodb.conf') - self.assertIsInstance(bootstrap.transport, websocket.Driver) diff --git a/zaqar/tests/unit/transport/__init__.py b/zaqar/tests/unit/transport/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/transport/test_acl.py b/zaqar/tests/unit/transport/test_acl.py deleted file mode 100644 index 470d3ef0..00000000 --- a/zaqar/tests/unit/transport/test_acl.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import namedtuple - -from oslo_policy import policy - -from zaqar import context -from zaqar.tests import base -from zaqar.transport import acl -from zaqar.transport.wsgi import errors - - -class TestAcl(base.TestBase): - - def setUp(self): - super(TestAcl, self).setUp() - ctx = context.RequestContext() - request_class = namedtuple("Request", ("env",)) - self.request = request_class({"zaqar.context": ctx}) - - def _set_policy(self, json): - acl.setup_policy(self.conf) - rules = policy.Rules.load_json(json) - acl.ENFORCER.set_rules(rules, use_conf=False) - - def test_policy_allow(self): - @acl.enforce("queues:get_all") - def test(ign, request): - pass - - json = '{"queues:get_all": ""}' - self._set_policy(json) - - test(None, self.request) - - def test_policy_deny(self): - @acl.enforce("queues:get_all") - def test(ign, request): - pass - - json = '{"queues:get_all": "!"}' - self._set_policy(json) - - self.assertRaises(errors.HTTPForbidden, test, None, self.request) diff --git a/zaqar/tests/unit/transport/websocket/__init__.py b/zaqar/tests/unit/transport/websocket/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/transport/websocket/base.py b/zaqar/tests/unit/transport/websocket/base.py deleted file mode 100644 index 54539106..00000000 --- a/zaqar/tests/unit/transport/websocket/base.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_serialization import jsonutils - -from zaqar import bootstrap -from zaqar.common import configs -from zaqar import tests as testing -from zaqar.transport import validation -from zaqar.transport.websocket import driver - - -class TestBase(testing.TestBase): - - config_file = None - - def setUp(self): - super(TestBase, self).setUp() - - if not self.config_file: - self.skipTest("No config specified") - - self.conf.register_opts(configs._GENERAL_OPTIONS) - self.conf.register_opts(validation._TRANSPORT_LIMITS_OPTIONS, - group=validation._TRANSPORT_LIMITS_GROUP) - self.transport_cfg = self.conf[validation._TRANSPORT_LIMITS_GROUP] - - self.conf.register_opts(driver._WS_OPTIONS, - group=driver._WS_GROUP) - self.ws_cfg = self.conf[driver._WS_GROUP] - - self.conf.unreliable = True - self.conf.admin_mode = True - self.boot = bootstrap.Bootstrap(self.conf) - self.addCleanup(self.boot.storage.close) - self.addCleanup(self.boot.control.close) - - self.transport = self.boot.transport - self.api = self.boot.api - - def tearDown(self): - if self.conf.pooling: - self.boot.control.pools_controller.drop_all() - self.boot.control.catalogue_controller.drop_all() - super(TestBase, self).tearDown() - - -class TestBaseFaulty(TestBase): - """This test ensures we aren't letting any exceptions go unhandled.""" - - -class V1Base(TestBase): - """Base class for V1 API Tests. - - Should contain methods specific to V1 of the API - """ - pass - - -class V1BaseFaulty(TestBaseFaulty): - """Base class for V1 API Faulty Tests. - - Should contain methods specific to V1 exception testing - """ - pass - - -class V1_1Base(TestBase): - """Base class for V1.1 API Tests. - - Should contain methods specific to V1.1 of the API - """ - - def _empty_message_list(self, body): - self.assertEqual([], jsonutils.loads(body[0])['messages']) - - -class V1_1BaseFaulty(TestBaseFaulty): - """Base class for V1.1 API Faulty Tests. - - Should contain methods specific to V1.1 exception testing - """ - pass - - -class V2Base(V1_1Base): - """Base class for V2 API Tests. - - Should contain methods specific to V2 of the API - """ - - -class V2BaseFaulty(V1_1BaseFaulty): - """Base class for V2 API Faulty Tests. - - Should contain methods specific to V2 exception testing - """ diff --git a/zaqar/tests/unit/transport/websocket/test_protocol.py b/zaqar/tests/unit/transport/websocket/test_protocol.py deleted file mode 100644 index 1c56695f..00000000 --- a/zaqar/tests/unit/transport/websocket/test_protocol.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2016 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import ddt -import mock - -from oslo_utils import uuidutils -from zaqar.tests.unit.transport.websocket import base -from zaqar.tests.unit.transport.websocket import utils as test_utils - - -@ddt.ddt -class TestMessagingProtocol(base.TestBase): - config_file = "websocket_mongodb.conf" - - def setUp(self): - super(TestMessagingProtocol, self).setUp() - self.protocol = self.transport.factory() - self.project_id = 'protocol-test' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - def test_on_message_with_invalid_input(self): - payload = u'\ufeff' - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - self.protocol.onMessage(payload, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - payload = "123" - - self.protocol.onMessage(payload, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - def test_on_message_with_invalid_input_binary(self): - dumps, loads, create_req = test_utils.get_pack_tools(binary=True) - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - # Test error response, when the request can't be deserialized. - req = "123" - self.protocol.onMessage(req, True) - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - self.assertIn('Can\'t decode binary', resp['body']['error']) - - # Test error response, when request body is not a dictionary. - req = dumps("Apparently, I'm not a dictionary") - self.protocol.onMessage(req, True) - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - self.assertIn('Unexpected body type. Expected dict', - resp['body']['error']) - - # Test error response, when validation fails. - action = 'queue_glorify' - body = {} - req = create_req(action, body, self.headers) - self.protocol.onMessage(req, True) - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - self.assertEqual('queue_glorify is not a valid action', - resp['body']['error']) - - @ddt.data(True, False) - def test_on_message_with_input_in_different_format(self, in_binary): - dumps, loads, create_req = test_utils.get_pack_tools(binary=in_binary) - action = 'queue_get' - body = {'queue_name': 'beautiful-non-existing-queue'} - req = create_req(action, body, self.headers) - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - self.protocol.onMessage(req, in_binary) - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) diff --git a/zaqar/tests/unit/transport/websocket/utils.py b/zaqar/tests/unit/transport/websocket/utils.py deleted file mode 100644 index 0bfa5115..00000000 --- a/zaqar/tests/unit/transport/websocket/utils.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import functools -import json -import msgpack - - -def create_request(action, body, headers): - return json.dumps({"action": action, "body": body, "headers": headers}) - - -def create_binary_request(action, body, headers): - return msgpack.packb({"action": action, "body": body, "headers": headers}) - - -def get_pack_tools(binary=None): - """Get serialization tools for testing websocket transport. - - :param bool binary: type of serialization tools. - True: binary (MessagePack) tools. - False: text (JSON) tools. - :returns: set of serialization tools needed for testing websocket - transport: (dumps, loads, create_request_function) - :rtype: tuple - """ - if binary is None: - raise Exception("binary param is unspecified") - if binary: - dumps = msgpack.Packer(encoding='utf-8', use_bin_type=False).pack - loads = functools.partial(msgpack.unpackb, encoding='utf-8') - create_request_function = create_binary_request - else: - dumps = json.dumps - loads = json.loads - create_request_function = create_request - return dumps, loads, create_request_function diff --git a/zaqar/tests/unit/transport/websocket/v2/__init__.py b/zaqar/tests/unit/transport/websocket/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/transport/websocket/v2/test_auth.py b/zaqar/tests/unit/transport/websocket/v2/test_auth.py deleted file mode 100644 index 5be432c2..00000000 --- a/zaqar/tests/unit/transport/websocket/v2/test_auth.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -import ddt -from keystonemiddleware import auth_token -import mock - -from oslo_utils import uuidutils -from zaqar.common import consts -from zaqar.common import urls -from zaqar.tests.unit.transport.websocket import base -from zaqar.tests.unit.transport.websocket import utils as test_utils - - -@ddt.ddt -class AuthTest(base.V2Base): - config_file = "websocket_mongodb_keystone_auth.conf" - - def setUp(self): - super(AuthTest, self).setUp() - self.protocol = self.transport.factory() - self.protocol.factory._secret_key = 'secret' - - self.default_message_ttl = 3600 - - self.project_id = '7e55e1a7e' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - auth_mock = mock.patch.object(auth_token.AuthProtocol, '__call__') - self.addCleanup(auth_mock.stop) - self.auth = auth_mock.start() - self.env = {'keystone.token_info': { - 'token': {'expires_at': '2035-08-05T15:16:33.603700+00:00'}}} - - def test_post(self): - headers = self.headers.copy() - headers['X-Auth-Token'] = 'mytoken1' - req = json.dumps({'action': 'authenticate', 'headers': headers}) - - msg_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(msg_mock.stop) - msg_mock = msg_mock.start() - self.protocol.onMessage(req, False) - - # Didn't send the response yet - self.assertEqual(0, msg_mock.call_count) - - self.assertEqual(1, self.auth.call_count) - responses = [] - self.protocol._auth_start(self.env, lambda x, y: responses.append(x)) - - self.assertEqual(1, len(responses)) - self.assertEqual('200 OK', responses[0]) - - # Check that the env is available to future requests - req = json.dumps({'action': consts.MESSAGE_LIST, - 'body': {'queue_name': 'myqueue'}, - 'headers': self.headers}) - process_request = mock.patch.object(self.protocol._handler, - 'process_request').start() - process_request.return_value = self.protocol._handler.create_response( - 200, {}) - self.protocol.onMessage(req, False) - self.assertEqual(1, process_request.call_count) - self.assertEqual(self.env, process_request.call_args[0][0]._env) - - def test_post_between_auth(self): - headers = self.headers.copy() - headers['X-Auth-Token'] = 'mytoken1' - req = json.dumps({'action': 'authenticate', 'headers': headers}) - - msg_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(msg_mock.stop) - msg_mock = msg_mock.start() - self.protocol.onMessage(req, False) - - req = test_utils.create_request(consts.QUEUE_LIST, {}, self.headers) - self.protocol.onMessage(req, False) - - self.assertEqual(1, msg_mock.call_count) - resp = json.loads(msg_mock.call_args[0][0]) - self.assertEqual(403, resp['headers']['status']) - - def test_failed_auth(self): - msg_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(msg_mock.stop) - msg_mock = msg_mock.start() - self.protocol._auth_in_binary = False - self.protocol._auth_response('401 error', 'Failed') - self.assertEqual(1, msg_mock.call_count) - resp = json.loads(msg_mock.call_args[0][0]) - self.assertEqual(401, resp['headers']['status']) - self.assertEqual('authenticate', resp['request']['action']) - - def test_reauth(self): - headers = self.headers.copy() - headers['X-Auth-Token'] = 'mytoken1' - req = json.dumps({'action': 'authenticate', 'headers': headers}) - - msg_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(msg_mock.stop) - msg_mock = msg_mock.start() - self.protocol.onMessage(req, False) - - self.assertEqual(1, self.auth.call_count) - responses = [] - self.protocol._auth_start(self.env, lambda x, y: responses.append(x)) - - self.assertEqual(1, len(responses)) - handle = self.protocol._deauth_handle - self.assertIsNotNone(handle) - - headers = self.headers.copy() - headers['X-Auth-Token'] = 'mytoken2' - req = json.dumps({'action': 'authenticate', 'headers': headers}) - self.protocol.onMessage(req, False) - self.protocol._auth_start(self.env, lambda x, y: responses.append(x)) - - self.assertNotEqual(handle, self.protocol._deauth_handle) - self.assertEqual(2, len(responses)) - self.assertIn('cancelled', repr(handle)) - self.assertNotIn('cancelled', repr(self.protocol._deauth_handle)) - - def test_reauth_after_auth_failure(self): - headers = self.headers.copy() - headers['X-Auth-Token'] = 'wrong_token' - req = json.dumps({'action': 'authenticate', 'headers': headers}) - - msg_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(msg_mock.stop) - msg_mock = msg_mock.start() - # after authenticate failure, the _auth_app will be None and the - # request will raise 401 error. - self.protocol.onMessage(req, False) - self.protocol._auth_response('401 error', 'Failed') - resp = json.loads(msg_mock.call_args[0][0]) - - self.assertEqual(401, resp['headers']['status']) - self.assertEqual('authenticate', resp['request']['action']) - self.assertIsNone(self.protocol._auth_app) - - # try to authenticate again, "onMessage" should not return 403 because - # that the _auth_app was cleaned after auth failure. - headers['X-Auth-Token'] = 'mytoken' - req = json.dumps({'action': 'authenticate', 'headers': headers}) - self.protocol.onMessage(req, False) - - self.protocol._auth_response('200 OK', 'authenticate success') - resp = json.loads(msg_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - @ddt.data(True, False) - def test_auth_response_serialization_format(self, in_binary): - dumps, loads, create_req = test_utils.get_pack_tools(binary=in_binary) - headers = self.headers.copy() - headers['X-Auth-Token'] = 'mytoken1' - req = create_req("authenticate", {}, headers) - - msg_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(msg_mock.stop) - msg_mock = msg_mock.start() - # Depending on onMessage method's second argument, auth response should - # be in binary or text format. - self.protocol.onMessage(req, in_binary) - self.assertEqual(in_binary, self.protocol._auth_in_binary) - self.protocol._auth_response('401 error', 'Failed') - self.assertEqual(1, msg_mock.call_count) - resp = loads(msg_mock.call_args[0][0]) - self.assertEqual(401, resp['headers']['status']) - - def test_signed_url(self): - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - data = urls.create_signed_url( - 'secret', ['/v2/queues/myqueue/messages'], project=self.project_id, - methods=['GET']) - - headers = self.headers.copy() - headers.update({ - 'URL-Signature': data['signature'], - 'URL-Expires': data['expires'], - 'URL-Methods': ['GET'], - 'URL-Paths': ['/v2/queues/myqueue/messages'] - }) - req = json.dumps({'action': consts.MESSAGE_LIST, - 'body': {'queue_name': 'myqueue'}, - 'headers': headers}) - self.protocol.onMessage(req, False) - - self.assertEqual(1, send_mock.call_count) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - def test_signed_url_wrong_queue(self): - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - data = urls.create_signed_url( - 'secret', ['/v2/queues/myqueue/messages'], project=self.project_id, - methods=['GET']) - - headers = self.headers.copy() - headers.update({ - 'URL-Signature': data['signature'], - 'URL-Expires': data['expires'], - 'URL-Methods': ['GET'], - 'URL-Paths': ['/v2/queues/otherqueue/messages'] - }) - req = json.dumps({'action': consts.MESSAGE_LIST, - 'body': {'queue_name': 'otherqueue'}, - 'headers': headers}) - self.protocol.onMessage(req, False) - - self.assertEqual(1, send_mock.call_count) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(403, resp['headers']['status']) - - def test_signed_url_wrong_method(self): - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - data = urls.create_signed_url( - 'secret', ['/v2/queues/myqueue/messages'], project=self.project_id, - methods=['GET']) - - headers = self.headers.copy() - headers.update({ - 'URL-Signature': data['signature'], - 'URL-Expires': data['expires'], - 'URL-Methods': ['GET'], - 'URL-Paths': ['/v2/queues/myqueue/messages'] - }) - req = json.dumps({'action': consts.MESSAGE_DELETE, - 'body': {'queue_name': 'myqueue', - 'message_id': '123'}, - 'headers': headers}) - self.protocol.onMessage(req, False) - - self.assertEqual(1, send_mock.call_count) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(403, resp['headers']['status']) diff --git a/zaqar/tests/unit/transport/websocket/v2/test_claims.py b/zaqar/tests/unit/transport/websocket/v2/test_claims.py deleted file mode 100644 index 5fb8bd26..00000000 --- a/zaqar/tests/unit/transport/websocket/v2/test_claims.py +++ /dev/null @@ -1,439 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import json - -import ddt -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from zaqar.common import consts -from zaqar.tests.unit.transport.websocket import base -from zaqar.tests.unit.transport.websocket import utils as test_utils - - -@ddt.ddt -class ClaimsBaseTest(base.V1_1Base): - - config_file = "websocket_mongodb.conf" - - def setUp(self): - super(ClaimsBaseTest, self).setUp() - self.protocol = self.transport.factory() - self.defaults = self.api.get_defaults() - - self.project_id = '7e55e1a7e' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - action = consts.QUEUE_CREATE - body = {"queue_name": "skittle"} - req = test_utils.create_request(action, body, self.headers) - - with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: - self.protocol.onMessage(req, False) - resp = json.loads(msg_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - - action = consts.MESSAGE_POST - body = {"queue_name": "skittle", - "messages": [ - {'body': 239, 'ttl': 300}, - {'body': {'key_1': 'value_1'}, 'ttl': 300}, - {'body': [1, 3], 'ttl': 300}, - {'body': 439, 'ttl': 300}, - {'body': {'key_2': 'value_2'}, 'ttl': 300}, - {'body': ['a', 'b'], 'ttl': 300}, - {'body': 639, 'ttl': 300}, - {'body': {'key_3': 'value_3'}, 'ttl': 300}, - {'body': ["aa", "bb"], 'ttl': 300}] - } - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - - def tearDown(self): - super(ClaimsBaseTest, self).tearDown() - action = consts.QUEUE_DELETE - body = {'queue_name': 'skittle'} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - @ddt.data('[', '[]', '.', '"fail"') - def test_bad_claim(self, doc): - action = consts.CLAIM_CREATE - body = doc - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - action = consts.CLAIM_UPDATE - body = doc - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - def test_exceeded_claim(self): - action = consts.CLAIM_CREATE - body = {"queue_name": "skittle", - "ttl": 100, - "grace": 60, - "limit": 21} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) - def test_unacceptable_ttl_or_grace(self, ttl_grace): - ttl, grace = ttl_grace - action = consts.CLAIM_CREATE - body = {"queue_name": "skittle", - "ttl": ttl, - "grace": grace} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - @ddt.data(-1, 59, 43201) - def test_unacceptable_new_ttl(self, ttl): - claim = self._get_a_claim() - - action = consts.CLAIM_UPDATE - body = {"queue_name": "skittle", - "claim_id": claim['body']['claim_id'], - "ttl": ttl} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - def test_default_ttl_and_grace(self): - action = consts.CLAIM_CREATE - body = {"queue_name": "skittle"} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - - action = consts.CLAIM_GET - body = {"queue_name": "skittle", - "claim_id": resp['body']['claim_id']} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(self.defaults.claim_ttl, resp['body']['ttl']) - - def test_lifecycle(self): - # First, claim some messages - action = consts.CLAIM_CREATE - body = {"queue_name": "skittle", - "ttl": 100, - "grace": 60} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - claimed_messages = resp['body']['messages'] - claim_id = resp['body']['claim_id'] - - # No more messages to claim - body = {"queue_name": "skittle", - "ttl": 100, - "grace": 60} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - # Listing messages, by default, won't include claimed, will echo - action = consts.MESSAGE_LIST - body = {"queue_name": "skittle", - "echo": True} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual([], resp['body']['messages']) - - # Listing messages, by default, won't include claimed, won't echo - - body = {"queue_name": "skittle", - "echo": False} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual([], resp['body']['messages']) - - # List messages, include_claimed, but don't echo - - body = {"queue_name": "skittle", - "include_claimed": True, - "echo": False} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(resp['body']['messages'], []) - - # List messages with a different client-id and echo=false. - # Should return some messages - - body = {"queue_name": "skittle", - "echo": False} - - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - # Include claimed messages this time, and echo - - body = {"queue_name": "skittle", - "include_claimed": True, - "echo": True} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(len(claimed_messages), len(resp['body']['messages'])) - - message_id_1 = resp['body']['messages'][0]['id'] - message_id_2 = resp['body']['messages'][1]['id'] - - # Try to delete the message without submitting a claim_id - action = consts.MESSAGE_DELETE - body = {"queue_name": "skittle", - "message_id": message_id_1} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(403, resp['headers']['status']) - - # Delete the message and its associated claim - body = {"queue_name": "skittle", - "message_id": message_id_1, - "claim_id": claim_id} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - # Try to get it from the wrong project - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'someproject' - } - - action = consts.MESSAGE_GET - body = {"queue_name": "skittle", - "message_id": message_id_2} - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - # Get the message - action = consts.MESSAGE_GET - body = {"queue_name": "skittle", - "message_id": message_id_2} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - # Update the claim - creation = timeutils.utcnow() - action = consts.CLAIM_UPDATE - body = {"queue_name": "skittle", - "ttl": 60, - "grace": 60, - "claim_id": claim_id} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - # Get the claimed messages (again) - action = consts.CLAIM_GET - body = {"queue_name": "skittle", - "claim_id": claim_id} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - query = timeutils.utcnow() - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(60, resp['body']['ttl']) - - message_id_3 = resp['body']['messages'][0]['id'] - - estimated_age = timeutils.delta_seconds(creation, query) - # The claim's age should be 0 at this moment. But in some unexpected - # case, such as slow test, the age maybe larger than 0. Just skip - # asserting if so. - if resp['body']['age'] == 0: - self.assertGreater(estimated_age, resp['body']['age']) - - # Delete the claim - action = consts.CLAIM_DELETE - body = {"queue_name": "skittle", - "claim_id": claim_id} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - # Try to delete a message with an invalid claim ID - action = consts.MESSAGE_DELETE - body = {"queue_name": "skittle", - "message_id": message_id_3, - "claim_id": claim_id} - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - # Make sure it wasn't deleted! - action = consts.MESSAGE_GET - body = {"queue_name": "skittle", - "message_id": message_id_2} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - # Try to get a claim that doesn't exist - action = consts.CLAIM_GET - body = {"queue_name": "skittle", - "claim_id": claim_id} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - # Try to update a claim that doesn't exist - action = consts.CLAIM_UPDATE - body = {"queue_name": "skittle", - "ttl": 60, - "grace": 60, - "claim_id": claim_id} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - def test_post_claim_nonexistent_queue(self): - action = consts.CLAIM_CREATE - body = {"queue_name": "nonexistent", - "ttl": 100, - "grace": 60} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - def test_get_claim_nonexistent_queue(self): - action = consts.CLAIM_GET - body = {"queue_name": "nonexistent", - "claim_id": "aaabbbba"} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - def _get_a_claim(self): - action = consts.CLAIM_CREATE - body = {"queue_name": "skittle", - "ttl": 100, - "grace": 60} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - - return resp diff --git a/zaqar/tests/unit/transport/websocket/v2/test_messages.py b/zaqar/tests/unit/transport/websocket/v2/test_messages.py deleted file mode 100644 index dad982e1..00000000 --- a/zaqar/tests/unit/transport/websocket/v2/test_messages.py +++ /dev/null @@ -1,610 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import json - -import ddt -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from testtools import matchers - -from zaqar.common import consts -from zaqar.tests.unit.transport.websocket import base -from zaqar.tests.unit.transport.websocket import utils as test_utils -from zaqar.transport import validation - - -@ddt.ddt -class MessagesBaseTest(base.V2Base): - - config_file = "websocket_mongodb.conf" - - def setUp(self): - super(MessagesBaseTest, self).setUp() - self.protocol = self.transport.factory() - - self.default_message_ttl = 3600 - - self.project_id = '7e55e1a7e' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - body = {"queue_name": "kitkat"} - req = test_utils.create_request(consts.QUEUE_CREATE, - body, self.headers) - - with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: - self.protocol.onMessage(req, False) - resp = json.loads(msg_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - - def tearDown(self): - super(MessagesBaseTest, self).tearDown() - body = {"queue_name": "kitkat"} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(consts.QUEUE_DELETE, - body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - def _test_post(self, sample_messages, in_binary=False): - body = {"queue_name": "kitkat", - "messages": sample_messages} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - dumps, loads, create_req = test_utils.get_pack_tools(binary=in_binary) - - req = create_req(consts.MESSAGE_POST, body, self.headers) - - self.protocol.onMessage(req, in_binary) - - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - self.msg_ids = resp['body']['message_ids'] - self.assertEqual(len(sample_messages), len(self.msg_ids)) - - lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) - - # Test GET on the message resource directly - # NOTE(cpp-cabrera): force the passing of time to age a message - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - for msg_id in self.msg_ids: - headers = self.headers.copy() - headers['X-Project-ID'] = '777777' - # Wrong project ID - action = consts.MESSAGE_GET - body = {"queue_name": "kitkat", - "message_id": msg_id} - - req = create_req(action, body, headers) - - self.protocol.onMessage(req, in_binary) - - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - # Correct project ID - req = create_req(action, body, self.headers) - - self.protocol.onMessage(req, in_binary) - - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - # Check message properties - message = resp['body']['messages'] - self.assertEqual(lookup[message['ttl']], message['body']) - self.assertEqual(msg_id, message['id']) - - # no negative age - # NOTE(cpp-cabrera): testtools lacks - # GreaterThanEqual on py26 - self.assertThat(message['age'], - matchers.GreaterThan(-1)) - - # Test bulk GET - action = consts.MESSAGE_GET_MANY - body = {"queue_name": "kitkat", - "message_ids": self.msg_ids} - req = create_req(action, body, self.headers) - - self.protocol.onMessage(req, in_binary) - - resp = loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - expected_ttls = set(m['ttl'] for m in sample_messages) - actual_ttls = set(m['ttl'] for m in resp['body']['messages']) - self.assertFalse(expected_ttls - actual_ttls) - actual_ids = set(m['id'] for m in resp['body']['messages']) - self.assertFalse(set(self.msg_ids) - actual_ids) - - def test_exceeded_payloads(self): - # Get a valid message id - resp = self._post_messages("kitkat") - msg_id = resp['body']['message_ids'] - - # Bulk GET restriction - get_msg_ids = msg_id * 21 - action = consts.MESSAGE_GET_MANY - body = {"queue_name": "kitkat", - "message_ids": get_msg_ids} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - # Listing restriction - body['limit'] = 21 - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - # Bulk deletion restriction - del_msg_ids = msg_id * 22 - action = consts.MESSAGE_GET_MANY - body = {"queue_name": "kitkat", - "message_ids": del_msg_ids} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - @ddt.data(True, False) - def test_post_single(self, in_binary): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': 200}, - ] - - self._test_post(sample_messages, in_binary=in_binary) - - @ddt.data(True, False) - def test_post_multiple(self, in_binary): - sample_messages = [ - {'body': 239, 'ttl': 100}, - {'body': {'key': 'value'}, 'ttl': 200}, - {'body': [1, 3], 'ttl': 300}, - ] - - self._test_post(sample_messages, in_binary=in_binary) - - def test_post_optional_ttl(self): - messages = [{'body': 239}, - {'body': {'key': 'value'}, 'ttl': 200}] - - action = consts.MESSAGE_POST - body = {"queue_name": "kitkat", - "messages": messages} - req = test_utils.create_request(action, body, self.headers) - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(201, resp['headers']['status']) - msg_id = resp['body']['message_ids'][0] - - action = consts.MESSAGE_GET - body = {"queue_name": "kitkat", "message_id": msg_id} - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(self.default_message_ttl, - resp['body']['messages']['ttl']) - - def test_post_to_non_ascii_queue(self): - queue_name = u'non-ascii-n\u0153me' - - if six.PY2: - queue_name = queue_name.encode('utf-8') - - resp = self._post_messages(queue_name) - self.assertEqual(400, resp['headers']['status']) - - def test_post_with_long_queue_name(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - queue_name = 'v' * validation.QUEUE_NAME_MAX_LEN - - resp = self._post_messages(queue_name) - self.assertEqual(201, resp['headers']['status']) - - queue_name += 'v' - resp = self._post_messages(queue_name) - self.assertEqual(400, resp['headers']['status']) - - def test_post_to_missing_queue(self): - queue_name = 'nonexistent' - resp = self._post_messages(queue_name) - self.assertEqual(201, resp['headers']['status']) - - def test_post_invalid_ttl(self): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': '200'}, - ] - - action = consts.MESSAGE_POST - body = {"queue_name": "kitkat", - "messages": sample_messages} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - send_mock = send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - self.assertEqual( - 'Bad request. The value of the "ttl" field must be a int.', - resp['body']['exception']) - - def test_post_no_body(self): - sample_messages = [ - {'ttl': 200}, - ] - - action = consts.MESSAGE_POST - body = {"queue_name": "kitkat", - "messages": sample_messages} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - send_mock = send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - self.assertEqual( - 'Bad request. Missing "body" field.', resp['body']['exception']) - - def test_get_from_missing_queue(self): - action = consts.MESSAGE_LIST - body = {"queue_name": "anothernonexistent"} - req = test_utils.create_request(action, body, self.headers) - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual([], resp['body']['messages']) - - @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') - def test_bad_client_id(self, text_id): - action = consts.MESSAGE_POST - body = { - "queue_name": "kinder", - "messages": [{"ttl": 60, - "body": ""}] - } - headers = { - 'Client-ID': text_id, - 'X-Project-ID': self.project_id - } - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - action = consts.MESSAGE_GET - body = { - "queue_name": "kinder", - "limit": 3, - "echo": True - } - - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - @ddt.data(None, '[', '[]', '{}', '.') - def test_post_bad_message(self, document): - action = consts.MESSAGE_POST - body = { - "queue_name": "kinder", - "messages": document - } - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - @ddt.data(-1, 59, 1209601) - def test_unacceptable_ttl(self, ttl): - action = consts.MESSAGE_POST - body = {"queue_name": "kinder", - "messages": [{"ttl": ttl, "body": ""}]} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - def test_exceeded_message_posting(self): - # Total (raw request) size - document = [{'body': "some body", 'ttl': 100}] * 8000 - action = consts.MESSAGE_POST - body = { - "queue_name": "kinder", - "messages": document - } - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - @ddt.data('{"overflow": 9223372036854775808}', - '{"underflow": -9223372036854775809}') - def test_unsupported_json(self, document): - action = consts.MESSAGE_POST - body = { - "queue_name": "fizz", - "messages": document - } - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - def test_delete(self): - resp = self._post_messages("tofi") - msg_id = resp['body']['message_ids'][0] - - action = consts.MESSAGE_GET - body = {"queue_name": "tofi", - "message_id": msg_id} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - # Delete queue - action = consts.MESSAGE_DELETE - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - # Get non existent queue - action = consts.MESSAGE_GET - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - # Safe to delete non-existing ones - action = consts.MESSAGE_DELETE - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - def test_bulk_delete(self): - resp = self._post_messages("nerds", repeat=5) - msg_ids = resp['body']['message_ids'] - - action = consts.MESSAGE_DELETE_MANY - body = {"queue_name": "nerds", - "message_ids": msg_ids} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - action = consts.MESSAGE_GET - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - # Safe to delete non-existing ones - action = consts.MESSAGE_DELETE_MANY - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - # Even after the queue is gone - action = consts.QUEUE_DELETE - body = {"queue_name": "nerds"} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - action = consts.MESSAGE_DELETE_MANY - body = {"queue_name": "nerds", - "message_ids": msg_ids} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - def test_pop_delete(self): - self._post_messages("kitkat", repeat=5) - - action = consts.MESSAGE_DELETE_MANY - body = {"queue_name": "kitkat", "pop": 2} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(2, len(resp['body']['messages'])) - self.assertEqual(239, resp['body']['messages'][0]['body']) - self.assertEqual(239, resp['body']['messages'][1]['body']) - - def test_get_nonexistent_message_404s(self): - action = consts.MESSAGE_GET - body = {"queue_name": "notthere", - "message_id": "a"} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - def test_get_multiple_invalid_messages_404s(self): - action = consts.MESSAGE_GET_MANY - body = {"queue_name": "notnotthere", - "message_ids": ["a", "b", "c"]} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - def test_delete_multiple_invalid_messages_204s(self): - action = consts.MESSAGE_DELETE - body = {"queue_name": "yetanothernotthere", - "message_ids": ["a", "b", "c"]} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(400, resp['headers']['status']) - - def _post_messages(self, queue_name, repeat=1): - messages = [{'body': 239, 'ttl': 300}] * repeat - - action = consts.MESSAGE_POST - body = {"queue_name": queue_name, - "messages": messages} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, self.headers) - - self.protocol.onMessage(req, False) - - return json.loads(send_mock.call_args[0][0]) - - def test_invalid_request(self): - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - self.protocol.onMessage('foo', False) - self.assertEqual(1, send_mock.call_count) - response = json.loads(send_mock.call_args[0][0]) - self.assertIn('error', response['body']) - self.assertEqual({'status': 400}, response['headers']) - self.assertEqual( - {'action': None, 'api': 'v2', 'body': {}, 'headers': {}}, - response['request']) diff --git a/zaqar/tests/unit/transport/websocket/v2/test_queue_lifecycle.py b/zaqar/tests/unit/transport/websocket/v2/test_queue_lifecycle.py deleted file mode 100644 index bd3bb253..00000000 --- a/zaqar/tests/unit/transport/websocket/v2/test_queue_lifecycle.py +++ /dev/null @@ -1,676 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import json - -import ddt -import mock - -from oslo_utils import uuidutils -from zaqar.common import consts -from zaqar.storage import errors as storage_errors -from zaqar import tests as testing -from zaqar.tests.unit.transport.websocket import base -from zaqar.tests.unit.transport.websocket import utils as test_utils - - -@ddt.ddt -class QueueLifecycleBaseTest(base.V2Base): - - config_file = "websocket_mongodb.conf" - - def setUp(self): - super(QueueLifecycleBaseTest, self).setUp() - self.protocol = self.transport.factory() - - def test_empty_project_id(self): - action = consts.QUEUE_CREATE - body = {"queue_name": "kitkat", - "metadata": { - "key": { - "key2": "value", - "key3": [1, 2, 3, 4, 5]} - } - } - headers = {'Client-ID': uuidutils.generate_uuid()} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: - msg_mock.side_effect = validator - self.protocol.onMessage(req, False) - - @ddt.data('480924', 'foo') - def test_basics_thoroughly(self, project_id): - # Stats are empty - queue not created yet - action = consts.QUEUE_GET_STATS - body = {"queue_name": "gummybears"} - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': project_id - } - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(404, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Create - action = consts.QUEUE_CREATE - body = {"queue_name": "gummybears", - "metadata": { - "key": { - "key2": "value", - "key3": [1, 2, 3, 4, 5]}, - "messages": {"ttl": 600}, - } - } - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(201, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Fetch metadata - action = consts.QUEUE_GET - body = {"queue_name": "gummybears"} - meta = {"messages": {"ttl": 600}, - "key": { - "key2": "value", - "key3": [1, 2, 3, 4, 5]} - } - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(meta, resp['body']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Stats empty queue - action = consts.QUEUE_GET_STATS - body = {"queue_name": "gummybears"} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Delete - action = consts.QUEUE_DELETE - body = {"queue_name": "gummybears"} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(204, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Get non-existent stats - action = consts.QUEUE_GET_STATS - body = {"queue_name": "gummybears"} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(404, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_name_restrictions(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' - } - action = consts.QUEUE_CREATE - body = {"queue_name": 'marsbar', - "metadata": { - "key": { - "key2": "value", - "key3": [1, 2, 3, 4, 5]}, - "messages": {"ttl": 600}, - } - } - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(201, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - body["queue_name"] = "m@rsb@r" - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - body["queue_name"] = "marsbar" * 10 - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - - def test_project_id_restriction(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' * 30 - } - action = consts.QUEUE_CREATE - body = {"queue_name": 'poptart'} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - headers['X-Project-ID'] = 'test-project' - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(201, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_non_ascii_name(self): - test_params = ((u'/queues/non-ascii-n\u0153me', 'utf-8'), - (u'/queues/non-ascii-n\xc4me', 'iso8859-1')) - - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' * 30 - } - action = consts.QUEUE_CREATE - body = {"queue_name": test_params[0]} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - body = {"queue_name": test_params[1]} - req = test_utils.create_request(action, body, headers) - - self.protocol.onMessage(req, False) - - def test_no_metadata(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' - } - action = consts.QUEUE_CREATE - body = {"queue_name": "fizbat"} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(201, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(204, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - @ddt.data('{', '[]', '.', ' ') - def test_bad_metadata(self, meta): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' * 30 - } - action = consts.QUEUE_CREATE - body = {"queue_name": "fizbat", - "metadata": meta} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_too_much_metadata(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' - } - action = consts.QUEUE_CREATE - body = {"queue_name": "buttertoffee", - "metadata": {"messages": {"ttl": 600}, - "padding": "x"} - } - - max_size = self.transport_cfg.max_queue_metadata - body["metadata"]["padding"] = "x" * max_size - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_way_too_much_metadata(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' - } - action = consts.QUEUE_CREATE - body = {"queue_name": "peppermint", - "metadata": {"messages": {"ttl": 600}, - "padding": "x"} - } - - max_size = self.transport_cfg.max_queue_metadata - body["metadata"]["padding"] = "x" * max_size * 5 - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_update_metadata(self): - self.skip("Implement patch method") - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' - } - action = consts.QUEUE_CREATE - body = {"queue_name": "bonobon"} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - # Create - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(201, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Set meta - meta1 = {"messages": {"ttl": 600}, "padding": "x"} - body["metadata"] = meta1 - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(204, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Get - action = consts.QUEUE_GET - body = {"queue_name": "bonobon"} - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(204, resp['headers']['status']) - self.assertEqual(meta1, resp['body']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Update - action = consts.QUEUE_CREATE - meta2 = {"messages": {"ttl": 100}, "padding": "y"} - body["metadata"] = meta2 - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(204, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Get again - action = consts.QUEUE_GET - body = {"queue_name": "bonobon"} - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual(meta2, resp['body']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_list(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - headers = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - # NOTE(kgriffs): It's important that this one sort after the one - # above. This is in order to prove that bug/1236605 is fixed, and - # stays fixed! - # NOTE(vkmc): In websockets as well! - alt_project_id = str(arbitrary_number + 1) - - # List empty - action = consts.QUEUE_LIST - body = {} - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual([], resp['body']['queues']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Payload exceeded - body = {'limit': 21} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(400, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # Create some - def create_queue(project_id, queue_name, metadata): - altheaders = {'Client-ID': client_id} - if project_id is not None: - altheaders['X-Project-ID'] = project_id - action = consts.QUEUE_CREATE - body['queue_name'] = queue_name - body['metadata'] = metadata - - req = test_utils.create_request(action, body, altheaders) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(201, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - create_queue(project_id, 'q1', {"node": 31}) - create_queue(project_id, 'q2', {"node": 32}) - create_queue(project_id, 'q3', {"node": 33}) - - create_queue(alt_project_id, 'q3', {"alt": 1}) - - # List (limit) - body = {'limit': 2} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(2, len(resp['body']['queues'])) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # List (no metadata, get all) - body = {'limit': 5} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - # Ensure we didn't pick up the queue from the alt project. - self.assertEqual(3, len(resp['body']['queues'])) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # List with metadata - body = {'detailed': True} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - action = consts.QUEUE_GET - body = {"queue_name": "q1"} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - self.assertEqual({"node": 31}, resp['body']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # List tail - action = consts.QUEUE_LIST - body = {} - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(200, resp['headers']['status']) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - # List manually-constructed tail - body = {'marker': "zzz"} - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - - def test_list_returns_503_on_nopoolfound_exception(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'test-project' - } - action = consts.QUEUE_LIST - body = {} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(503, resp['headers']['status']) - - sender.side_effect = validator - - queue_controller = self.boot.storage.queue_controller - - with mock.patch.object(queue_controller, 'list') as mock_queue_list: - - def queue_generator(): - raise storage_errors.NoPoolFound() - - # This generator tries to be like queue controller list generator - # in some ways. - def fake_generator(): - yield queue_generator() - yield {} - mock_queue_list.return_value = fake_generator() - self.protocol.onMessage(req, False) - - def _post_messages(self, queue_name, headers, repeat=1): - messages = [{'body': 239, 'ttl': 300}] * repeat - - action = consts.MESSAGE_POST - body = {"queue_name": queue_name, - "messages": messages} - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - - req = test_utils.create_request(action, body, headers) - - self.protocol.onMessage(req, False) - - return json.loads(send_mock.call_args[0][0]) - - def test_purge(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - headers = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - queue_name = 'myqueue' - resp = self._post_messages(queue_name, headers, repeat=5) - msg_ids = resp['body']['message_ids'] - - send_mock = mock.Mock() - self.protocol.sendMessage = send_mock - for msg_id in msg_ids: - action = consts.MESSAGE_GET - body = {"queue_name": queue_name, "message_id": msg_id} - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(200, resp['headers']['status']) - - action = consts.QUEUE_PURGE - body = {"queue_name": queue_name, "resource_types": ["messages"]} - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(204, resp['headers']['status']) - - for msg_id in msg_ids: - action = consts.MESSAGE_GET - body = {"queue_name": queue_name, "message_id": msg_id} - req = test_utils.create_request(action, body, headers) - self.protocol.onMessage(req, False) - resp = json.loads(send_mock.call_args[0][0]) - self.assertEqual(404, resp['headers']['status']) - - -class TestQueueLifecycleMongoDB(QueueLifecycleBaseTest): - - config_file = 'websocket_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestQueueLifecycleMongoDB, self).setUp() - - def tearDown(self): - storage = self.boot.storage._storage - connection = storage.connection - - connection.drop_database(self.boot.control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - - super(TestQueueLifecycleMongoDB, self).tearDown() diff --git a/zaqar/tests/unit/transport/websocket/v2/test_subscriptions.py b/zaqar/tests/unit/transport/websocket/v2/test_subscriptions.py deleted file mode 100644 index 777bef5b..00000000 --- a/zaqar/tests/unit/transport/websocket/v2/test_subscriptions.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import time - -import mock -import msgpack - -from oslo_utils import uuidutils -from zaqar.common import auth -from zaqar.common import consts -from zaqar.storage import errors as storage_errors -from zaqar.tests.unit.transport.websocket import base -from zaqar.tests.unit.transport.websocket import utils as test_utils -from zaqar.transport.websocket import factory - - -class SubscriptionTest(base.V1_1Base): - - config_file = 'websocket_mongodb_subscriptions.conf' - - def setUp(self): - super(SubscriptionTest, self).setUp() - self.protocol = self.transport.factory() - - self.project_id = '7e55e1a7e' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - body = {'queue_name': 'kitkat'} - req = test_utils.create_request(consts.QUEUE_CREATE, - body, self.headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(resp['headers']['status'], 201) - - with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: - msg_mock.side_effect = validator - self.protocol.onMessage(req, False) - - def tearDown(self): - super(SubscriptionTest, self).tearDown() - body = {'queue_name': 'kitkat'} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(consts.QUEUE_DELETE, - body, self.headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(resp['headers']['status'], 204) - - sender.side_effect = validator - self.protocol.onMessage(req, False) - - def test_subscription_create(self): - action = consts.SUBSCRIPTION_CREATE - body = {'queue_name': 'kitkat', 'ttl': 600} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - subscription_factory = factory.NotificationFactory(None) - subscription_factory.set_subscription_url('http://localhost:1234/') - self.protocol._handler.set_subscription_factory(subscription_factory) - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - added_age = 1 - time.sleep(added_age) - [subscriber] = list( - next( - self.boot.storage.subscription_controller.list( - 'kitkat', self.project_id))) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', - subscriber['id'], project=self.project_id) - self.assertEqual('kitkat', subscriber['source']) - self.assertEqual(600, subscriber['ttl']) - self.assertEqual('http://localhost:1234/%s' % self.protocol.proto_id, - subscriber['subscriber']) - self.assertLessEqual(added_age, subscriber['age']) - - response = { - 'body': {'message': 'Subscription kitkat created.', - 'subscription_id': subscriber['id']}, - 'headers': {'status': 201}, - 'request': {'action': consts.SUBSCRIPTION_CREATE, - 'body': {'queue_name': 'kitkat', 'ttl': 600}, - 'api': 'v2', 'headers': self.headers}} - - self.assertEqual(1, sender.call_count) - self.assertEqual(response, json.loads(sender.call_args[0][0])) - - # Trigger protocol close - self.protocol.onClose(True, 100, None) - subscribers = list( - next( - self.boot.storage.subscription_controller.list( - 'kitkat', self.project_id))) - self.assertEqual([], subscribers) - - @mock.patch.object(auth, 'create_trust_id') - def test_subscription_create_trust(self, create_trust): - create_trust.return_value = 'trust_id' - action = consts.SUBSCRIPTION_CREATE - body = {'queue_name': 'kitkat', 'ttl': 600, - 'subscriber': 'trust+http://example.com'} - self.protocol._auth_env = {} - self.protocol._auth_env['X-USER-ID'] = 'user-id' - self.protocol._auth_env['X-ROLES'] = 'my-roles' - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - [subscriber] = list( - next( - self.boot.storage.subscription_controller.list( - 'kitkat', self.project_id))) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', - subscriber['id'], project=self.project_id) - self.assertEqual('trust+http://example.com', - subscriber['subscriber']) - self.assertEqual({'trust_id': 'trust_id'}, subscriber['options']) - - self.assertEqual('user-id', create_trust.call_args[0][1]) - self.assertEqual(self.project_id, create_trust.call_args[0][2]) - self.assertEqual(['my-roles'], create_trust.call_args[0][3]) - - def test_subscription_delete(self): - sub = self.boot.storage.subscription_controller.create( - 'kitkat', '', 600, {}, project=self.project_id) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', sub, - project=self.project_id) - action = consts.SUBSCRIPTION_DELETE - body = {'queue_name': 'kitkat', 'subscription_id': str(sub)} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - data = list( - next( - self.boot.storage.subscription_controller.list( - 'kitkat', self.project_id))) - self.assertEqual([], data) - - response = { - 'body': 'Subscription %s removed.' % str(sub), - 'headers': {'status': 204}, - 'request': {'action': consts.SUBSCRIPTION_DELETE, - 'body': {'queue_name': 'kitkat', - 'subscription_id': str(sub)}, - 'api': 'v2', 'headers': self.headers}} - self.assertEqual(1, sender.call_count) - self.assertEqual(response, json.loads(sender.call_args[0][0])) - - def test_subscription_create_no_queue(self): - action = consts.SUBSCRIPTION_CREATE - body = {'queue_name': 'shuffle', 'ttl': 600} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - subscription_factory = factory.NotificationFactory(None) - subscription_factory.set_subscription_url('http://localhost:1234/') - self.protocol._handler.set_subscription_factory(subscription_factory) - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - - [subscriber] = list( - next( - self.boot.storage.subscription_controller.list( - 'shuffle', self.project_id))) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'shuffle', - subscriber['id'], project=self.project_id) - - response = { - 'body': {'message': 'Subscription shuffle created.', - 'subscription_id': subscriber['id']}, - 'headers': {'status': 201}, - 'request': {'action': consts.SUBSCRIPTION_CREATE, - 'body': {'queue_name': 'shuffle', 'ttl': 600}, - 'api': 'v2', 'headers': self.headers}} - - self.assertEqual(1, sender.call_count) - self.assertEqual(response, json.loads(sender.call_args[0][0])) - - def test_subscription_get(self): - sub = self.boot.storage.subscription_controller.create( - 'kitkat', '', 600, {}, project=self.project_id) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', sub, - project=self.project_id) - action = consts.SUBSCRIPTION_GET - body = {'queue_name': 'kitkat', 'subscription_id': str(sub)} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - - expected_response_without_age = { - 'body': {'subscriber': '', - 'source': 'kitkat', - 'options': {}, - 'id': str(sub), - 'ttl': 600, - 'confirmed': False}, - 'headers': {'status': 200}, - 'request': {'action': consts.SUBSCRIPTION_GET, - 'body': {'queue_name': 'kitkat', - 'subscription_id': str(sub)}, - 'api': 'v2', 'headers': self.headers}} - - self.assertEqual(1, sender.call_count) - response = json.loads(sender.call_args[0][0]) - # Get and remove age from the actual response. - actual_sub_age = response['body'].pop('age') - self.assertLessEqual(0, actual_sub_age) - self.assertEqual(expected_response_without_age, response) - - def test_subscription_list(self): - sub = self.boot.storage.subscription_controller.create( - 'kitkat', '', 600, {}, project=self.project_id) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', sub, - project=self.project_id) - action = consts.SUBSCRIPTION_LIST - body = {'queue_name': 'kitkat'} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - - expected_response_without_age = { - 'body': { - 'subscriptions': [{ - 'subscriber': '', - 'source': 'kitkat', - 'options': {}, - 'id': str(sub), - 'ttl': 600, - 'confirmed': False}]}, - 'headers': {'status': 200}, - 'request': {'action': consts.SUBSCRIPTION_LIST, - 'body': {'queue_name': 'kitkat'}, - 'api': 'v2', 'headers': self.headers}} - self.assertEqual(1, sender.call_count) - response = json.loads(sender.call_args[0][0]) - # Get and remove age from the actual response. - actual_sub_age = response['body']['subscriptions'][0].pop('age') - self.assertLessEqual(0, actual_sub_age) - self.assertEqual(expected_response_without_age, response) - - def test_subscription_sustainable_notifications_format(self): - # NOTE(Eva-i): The websocket subscription's notifications must be - # sent in the same format, binary or text, as the format of the - # subscription creation request. - # This test checks that notifications keep their encoding format, even - # if the client suddenly starts sending requests in another format. - - # Create a subscription in binary format - action = consts.SUBSCRIPTION_CREATE - body = {'queue_name': 'kitkat', 'ttl': 600} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - subscription_factory = factory.NotificationFactory( - self.transport.factory) - subscription_factory.set_subscription_url('http://localhost:1234/') - self.protocol._handler.set_subscription_factory(subscription_factory) - - req = test_utils.create_binary_request(action, body, self.headers) - self.protocol.onMessage(req, True) - self.assertTrue(self.protocol.notify_in_binary) - - [subscriber] = list( - next( - self.boot.storage.subscription_controller.list( - 'kitkat', self.project_id))) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', - subscriber['id'], project=self.project_id) - - # Send a message in text format - webhook_notification_send_mock = mock.patch('requests.post') - self.addCleanup(webhook_notification_send_mock.stop) - webhook_notification_sender = webhook_notification_send_mock.start() - - action = consts.MESSAGE_POST - body = {"queue_name": "kitkat", - "messages": [{'body': {'status': 'disco queen'}, 'ttl': 60}]} - req = test_utils.create_request(action, body, self.headers) - self.protocol.onMessage(req, False) - self.assertTrue(self.protocol.notify_in_binary) - - # Check that the server responded in text format to the message - # creation request - message_create_response = json.loads(sender.call_args_list[1][0][0]) - self.assertEqual(201, message_create_response['headers']['status']) - - # Fetch webhook notification that was intended to arrive to - # notification protocol's listen address. Make subscription factory - # send it as websocket notification to the client - wh_notification = webhook_notification_sender.call_args[1]['data'] - subscription_factory.send_data(wh_notification, self.protocol.proto_id) - - # Check that the server sent the websocket notification in binary - # format - self.assertEqual(3, sender.call_count) - ws_notification = msgpack.unpackb(sender.call_args_list[2][0][0], - encoding='utf-8') - self.assertEqual({'body': {'status': 'disco queen'}, 'ttl': 60, - 'queue_name': 'kitkat', - 'Message_Type': u'Notification'}, ws_notification) - - def test_list_returns_503_on_nopoolfound_exception(self): - sub = self.boot.storage.subscription_controller.create( - 'kitkat', '', 600, {}, project=self.project_id) - self.addCleanup( - self.boot.storage.subscription_controller.delete, 'kitkat', sub, - project=self.project_id) - action = consts.SUBSCRIPTION_LIST - body = {'queue_name': 'kitkat'} - - send_mock = mock.patch.object(self.protocol, 'sendMessage') - self.addCleanup(send_mock.stop) - sender = send_mock.start() - - req = test_utils.create_request(action, body, self.headers) - - def validator(resp, isBinary): - resp = json.loads(resp) - self.assertEqual(503, resp['headers']['status']) - - sender.side_effect = validator - - subscription_controller = self.boot.storage.subscription_controller - - with mock.patch.object(subscription_controller, 'list') as \ - mock_subscription_list: - - def subscription_generator(): - raise storage_errors.NoPoolFound() - - # This generator tries to be like subscription controller list - # generator in some ways. - def fake_generator(): - yield subscription_generator() - yield {} - mock_subscription_list.return_value = fake_generator() - self.protocol.onMessage(req, False) diff --git a/zaqar/tests/unit/transport/wsgi/__init__.py b/zaqar/tests/unit/transport/wsgi/__init__.py deleted file mode 100644 index 4ef92ec0..00000000 --- a/zaqar/tests/unit/transport/wsgi/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -# TODO(kgriffs): Consider consolidating all of these tests into a -# single module. - -from zaqar.tests.unit.transport.wsgi import base - -TestBase = base.TestBase -TestBaseFaulty = base.TestBaseFaulty -V1Base = base.V1Base -V1_1Base = base.V1_1Base diff --git a/zaqar/tests/unit/transport/wsgi/base.py b/zaqar/tests/unit/transport/wsgi/base.py deleted file mode 100644 index 2b7dcc91..00000000 --- a/zaqar/tests/unit/transport/wsgi/base.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import six - -import falcon -from falcon import testing as ftest -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import bootstrap -from zaqar.common import configs -from zaqar import tests as testing -from zaqar.transport import validation -from zaqar.transport.wsgi import driver - - -class TestBase(testing.TestBase): - - config_file = None - - def setUp(self): - super(TestBase, self).setUp() - - if not self.config_file: - self.skipTest("No config specified") - - self.conf.register_opts(configs._GENERAL_OPTIONS) - self.conf.register_opts(validation._TRANSPORT_LIMITS_OPTIONS, - group=validation._TRANSPORT_LIMITS_GROUP) - self.transport_cfg = self.conf[validation._TRANSPORT_LIMITS_GROUP] - - self.conf.register_opts(driver._WSGI_OPTIONS, - group=driver._WSGI_GROUP) - self.wsgi_cfg = self.conf[driver._WSGI_GROUP] - - self.conf.unreliable = True - self.conf.admin_mode = True - self.boot = bootstrap.Bootstrap(self.conf) - self.addCleanup(self.boot.storage.close) - self.addCleanup(self.boot.control.close) - - self.app = self.boot.transport.app - - self.srmock = ftest.StartResponseMock() - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-ROLES': 'admin', - 'X-USER-ID': 'a12d157c7d0d41999096639078fd11fc', - 'X-TENANT-ID': 'abb69142168841fcaa2785791b92467f', - } - - def tearDown(self): - if self.conf.pooling: - self.boot.control.pools_controller.drop_all() - self.boot.control.catalogue_controller.drop_all() - super(TestBase, self).tearDown() - - def simulate_request(self, path, project_id=None, **kwargs): - """Simulate a request. - - Simulates a WSGI request to the API for testing. - - :param path: Request path for the desired resource - :param project_id: Project ID to use for the X-Project-ID header, - or None to not set the header - :param kwargs: Same as falcon.testing.create_environ() - - :returns: standard WSGI iterable response - """ - - # NOTE(flaper87): We create a copy regardless the headers - # were passed or not. This will prevent modifying `self.headers` - # in cases where simulate methods are called like: - # self.simulate_put(path, headers=self.headers) - headers = kwargs.get('headers', self.headers).copy() - project_id = ('518b51ea133c4facadae42c328d6b77b' if project_id - is None else project_id) - if kwargs.get('need_project_id', True): - headers['X-Project-ID'] = headers.get('X-Project-ID', project_id) - kwargs.pop('need_project_id', None) - kwargs['headers'] = headers - try: - if six.PY3: - path.encode('latin1').decode('utf-8', 'replace') - except UnicodeEncodeError: - self.srmock.status = falcon.HTTP_400 - return - - return self.app(ftest.create_environ(path=path, **kwargs), - self.srmock) - - def simulate_get(self, *args, **kwargs): - """Simulate a GET request.""" - kwargs['method'] = 'GET' - return self.simulate_request(*args, **kwargs) - - def simulate_head(self, *args, **kwargs): - """Simulate a HEAD request.""" - kwargs['method'] = 'HEAD' - return self.simulate_request(*args, **kwargs) - - def simulate_put(self, *args, **kwargs): - """Simulate a PUT request.""" - kwargs['method'] = 'PUT' - return self.simulate_request(*args, **kwargs) - - def simulate_post(self, *args, **kwargs): - """Simulate a POST request.""" - kwargs['method'] = 'POST' - return self.simulate_request(*args, **kwargs) - - def simulate_delete(self, *args, **kwargs): - """Simulate a DELETE request.""" - kwargs['method'] = 'DELETE' - return self.simulate_request(*args, **kwargs) - - def simulate_patch(self, *args, **kwargs): - """Simulate a PATCH request.""" - kwargs['method'] = 'PATCH' - return self.simulate_request(*args, **kwargs) - - -class TestBaseFaulty(TestBase): - """This test ensures we aren't letting any exceptions go unhandled.""" - - -class V1Base(TestBase): - """Base class for V1 API Tests. - - Should contain methods specific to V1 of the API - """ - url_prefix = '/v1' - - -class V1BaseFaulty(TestBaseFaulty): - """Base class for V1 API Faulty Tests. - - Should contain methods specific to V1 exception testing - """ - url_prefix = '/v1' - - -class V1_1Base(TestBase): - """Base class for V1.1 API Tests. - - Should contain methods specific to V1.1 of the API - """ - url_prefix = '/v1.1' - - def _empty_message_list(self, body): - self.assertEqual([], jsonutils.loads(body[0])['messages']) - - -class V1_1BaseFaulty(TestBaseFaulty): - """Base class for V1.1 API Faulty Tests. - - Should contain methods specific to V1.1 exception testing - """ - url_prefix = '/v1.1' - - -class V2Base(V1_1Base): - """Base class for V2 API Tests. - - Should contain methods specific to V2 of the API - """ - url_prefix = '/v2' - - -class V2BaseFaulty(V1_1BaseFaulty): - """Base class for V2 API Faulty Tests. - - Should contain methods specific to V2 exception testing - """ - url_prefix = '/v2' diff --git a/zaqar/tests/unit/transport/wsgi/test_utils.py b/zaqar/tests/unit/transport/wsgi/test_utils.py deleted file mode 100644 index 72ebc023..00000000 --- a/zaqar/tests/unit/transport/wsgi/test_utils.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import io -import json - -import falcon -import six -import testtools - -from zaqar.transport.wsgi import utils - - -class TestUtils(testtools.TestCase): - - def test_get_checked_field_missing(self): - doc = {} - - self.assertRaises(falcon.HTTPBadRequest, - utils.get_checked_field, doc, 'openstack', - int, None) - - self.assertRaises(falcon.HTTPBadRequest, - utils.get_checked_field, doc, 42, int, None) - - doc = {'openstac': 10} - - self.assertRaises(falcon.HTTPBadRequest, - utils.get_checked_field, doc, 'openstack', - int, None) - - value = utils.get_checked_field(doc, 'missing', int, 0) - self.assertEqual(0, value) - - value = utils.get_checked_field(doc, 'missing', dict, {}) - self.assertEqual({}, value) - - def test_get_checked_field_bad_type(self): - doc = {'openstack': '10'} - - self.assertRaises(falcon.HTTPBadRequest, - utils.get_checked_field, doc, 'openstack', - int, None) - - doc = {'openstack': 10, 'openstack-mq': 'test'} - - self.assertRaises(falcon.HTTPBadRequest, - utils.get_checked_field, doc, 'openstack', - str, None) - - doc = {'openstack': '[1, 2]'} - - self.assertRaises(falcon.HTTPBadRequest, - utils.get_checked_field, doc, 'openstack', - list, None) - - def test_get_checked_field(self): - doc = {'hello': 'world', 'the answer': 42, 'question': []} - - value = utils.get_checked_field(doc, 'hello', str, None) - self.assertEqual('world', value) - - value = utils.get_checked_field(doc, 'the answer', int, None) - self.assertEqual(42, value) - - value = utils.get_checked_field(doc, 'question', list, None) - self.assertEqual([], value) - - def test_filter_missing(self): - doc = {'body': {'event': 'start_backup'}} - spec = (('tag', dict, None),) - self.assertRaises(falcon.HTTPBadRequest, - utils.filter, doc, spec) - - spec = (('tag', str, 'db'),) - filtered = utils.filter(doc, spec) - self.assertEqual({'tag': 'db'}, filtered) - - def test_filter_bad_type(self): - doc = {'ttl': '300', 'bogus': 'yogabbagabba'} - spec = [('ttl', int, None)] - self.assertRaises(falcon.HTTPBadRequest, - utils.filter, doc, spec) - - def test_filter(self): - doc = {'body': {'event': 'start_backup'}} - - def spec(): - yield ('body', dict, None) - - filtered = utils.filter(doc, spec()) - self.assertEqual(doc, filtered) - - doc = {'ttl': 300, 'bogus': 'yogabbagabba'} - spec = [('ttl', int, None)] - filtered = utils.filter(doc, spec) - self.assertEqual({'ttl': 300}, filtered) - - doc = {'body': {'event': 'start_backup'}, 'ttl': 300} - spec = (('body', dict, None), ('ttl', int, None)) - filtered = utils.filter(doc, spec) - self.assertEqual(doc, filtered) - - def test_no_spec(self): - obj = {u'body': {'event': 'start_backup'}, 'ttl': 300} - document = six.text_type(json.dumps(obj, ensure_ascii=False)) - doc_stream = io.StringIO(document) - - deserialized = utils.deserialize(doc_stream, len(document)) - filtered = utils.sanitize(deserialized, spec=None) - self.assertEqual(obj, filtered) - - # NOTE(kgriffs): Ensure default value for *spec* is None - filtered2 = utils.sanitize(deserialized) - self.assertEqual(filtered, filtered2) - - def test_no_spec_array(self): - things = [{u'body': {'event': 'start_backup'}, 'ttl': 300}] - document = six.text_type(json.dumps(things, ensure_ascii=False)) - doc_stream = io.StringIO(document) - - deserialized = utils.deserialize(doc_stream, len(document)) - filtered = utils.sanitize(deserialized, doctype=utils.JSONArray, - spec=None) - self.assertEqual(things, filtered) - - def test_filter_star(self): - doc = {'ttl': 300, 'body': {'event': 'start_backup'}} - - spec = [('body', '*', None), ('ttl', '*', None)] - filtered = utils.filter(doc, spec) - - self.assertEqual(doc, filtered) - - def test_deserialize_and_sanitize_json_obj(self): - obj = {u'body': {'event': 'start_backup'}, 'id': 'DEADBEEF'} - - document = six.text_type(json.dumps(obj, ensure_ascii=False)) - stream = io.StringIO(document) - spec = [('body', dict, None), ('id', six.string_types, None)] - - # Positive test - deserialized_object = utils.deserialize(stream, len(document)) - filtered_object = utils.sanitize(deserialized_object, spec) - self.assertEqual(obj, filtered_object) - - # Negative test - self.assertRaises(falcon.HTTPBadRequest, - utils.sanitize, deserialized_object, spec, - doctype=utils.JSONArray) - - def test_deserialize_and_sanitize_json_array(self): - array = [{u'body': {u'x': 1}}, {u'body': {u'x': 2}}] - - document = six.text_type(json.dumps(array, ensure_ascii=False)) - stream = io.StringIO(document) - spec = [('body', dict, None)] - - # Positive test - deserialized_object = utils.deserialize(stream, len(document)) - filtered_object = utils.sanitize(deserialized_object, spec, - doctype=utils.JSONArray) - self.assertEqual(array, filtered_object) - - # Negative test - self.assertRaises(falcon.HTTPBadRequest, - utils.sanitize, deserialized_object, spec, - doctype=utils.JSONObject) - - def test_bad_doctype(self): - self.assertRaises(TypeError, - utils.sanitize, {}, None, doctype=int) - - def test_deserialize_bad_stream(self): - stream = None - length = None - self.assertRaises(falcon.HTTPBadRequest, - utils.deserialize, stream, length) diff --git a/zaqar/tests/unit/transport/wsgi/test_version.py b/zaqar/tests/unit/transport/wsgi/test_version.py deleted file mode 100644 index 7e56ba77..00000000 --- a/zaqar/tests/unit/transport/wsgi/test_version.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_serialization import jsonutils - -from zaqar.tests.unit.transport.wsgi import base - -EXPECTED_VERSIONS = [ - { - 'id': '1', - 'status': 'DEPRECATED', - 'updated': '2014-9-11T17:47:05Z', - 'media-types': [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.messaging-v1+json' - } - ], - 'links': [ - { - 'href': '/v1/', - 'rel': 'self' - } - ] - }, - { - 'id': '1.1', - 'status': 'DEPRECATED', - 'updated': '2016-7-29T02:22:47Z', - 'media-types': [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.messaging-v1_1+json' - } - ], - 'links': [ - { - 'href': '/v1.1/', - 'rel': 'self' - } - ] - }, - { - 'id': '2', - 'status': 'CURRENT', - 'updated': '2014-9-24T04:06:47Z', - 'media-types': [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.messaging-v2+json' - } - ], - 'links': [ - { - 'href': '/v2/', - 'rel': 'self' - } - ] - } -] - - -class TestVersion(base.TestBase): - - config_file = 'wsgi_mongodb.conf' - - def test_get(self): - response = self.simulate_get('/') - versions = jsonutils.loads(response[0])['versions'] - - self.assertEqual(falcon.HTTP_300, self.srmock.status) - self.assertEqual(3, len(versions)) - self.assertEqual(EXPECTED_VERSIONS, versions) diff --git a/zaqar/tests/unit/transport/wsgi/v1/__init__.py b/zaqar/tests/unit/transport/wsgi/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_auth.py b/zaqar/tests/unit/transport/wsgi/v1/test_auth.py deleted file mode 100644 index f566b731..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_auth.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Test Auth.""" - - -import falcon -from falcon import testing -from keystonemiddleware import auth_token -from oslo_utils import uuidutils - -from zaqar.tests.unit.transport.wsgi import base - - -class TestAuth(base.V1Base): - - config_file = 'keystone_auth.conf' - - def setUp(self): - super(TestAuth, self).setUp() - self.headers = {'Client-ID': uuidutils.generate_uuid()} - - def test_auth_install(self): - self.assertIsInstance(self.app._auth_app, auth_token.AuthProtocol) - - def test_non_authenticated(self): - env = testing.create_environ(self.url_prefix + '/480924/queues/', - method='GET', - headers=self.headers) - - self.app(env, self.srmock) - self.assertEqual(falcon.HTTP_401, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_claims.py b/zaqar/tests/unit/transport/wsgi/v1/test_claims.py deleted file mode 100644 index 18cb6b12..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_claims.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -from testtools import matchers - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestClaimsMongoDB(base.V1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestClaimsMongoDB, self).setUp() - - self.project_id = '480924' - self.queue_path = self.url_prefix + '/queues/fizbit' - self.claims_path = self.queue_path + '/claims' - self.messages_path = self.queue_path + '/messages' - - doc = '{"_ttl": 60}' - - self.simulate_put(self.queue_path, self.project_id, body=doc) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - doc = jsonutils.dumps([{'body': 239, 'ttl': 300}] * 10) - self.simulate_post(self.queue_path + '/messages', self.project_id, - body=doc, headers={'Client-ID': - uuidutils.generate_uuid()}) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - storage = self.boot.storage._storage - control = self.boot.control - connection = storage.connection - - connection.drop_database(control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - self.simulate_delete(self.queue_path, self.project_id) - - super(TestClaimsMongoDB, self).tearDown() - - @ddt.data(None, '[', '[]', '{}', '.', '"fail"') - def test_bad_claim(self, doc): - self.simulate_post(self.claims_path, self.project_id, body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - href = self._get_a_claim() - - self.simulate_patch(href, self.project_id, body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_exceeded_claim(self): - self.simulate_post(self.claims_path, self.project_id, - body='{"ttl": 100, "grace": 60}', - query_string='limit=21') - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) - def test_unacceptable_ttl_or_grace(self, ttl_grace): - ttl, grace = ttl_grace - self.simulate_post(self.claims_path, self.project_id, - body=jsonutils.dumps({'ttl': ttl, 'grace': grace})) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 59, 43201) - def test_unacceptable_new_ttl(self, ttl): - href = self._get_a_claim() - - self.simulate_patch(href, self.project_id, - body=jsonutils.dumps({'ttl': ttl})) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _get_a_claim(self): - doc = '{"ttl": 100, "grace": 60}' - self.simulate_post(self.claims_path, self.project_id, body=doc) - return self.srmock.headers_dict['Location'] - - def test_lifecycle(self): - doc = '{"ttl": 100, "grace": 60}' - - # First, claim some messages - body = self.simulate_post(self.claims_path, self.project_id, body=doc) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - claimed = jsonutils.loads(body[0]) - claim_href = self.srmock.headers_dict['Location'] - message_href, params = claimed[0]['href'].split('?') - - # No more messages to claim - self.simulate_post(self.claims_path, self.project_id, body=doc, - query_string='limit=3') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - # Listing messages, by default, won't include claimed - body = self.simulate_get(self.messages_path, self.project_id, - headers=headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Include claimed messages this time - body = self.simulate_get(self.messages_path, self.project_id, - query_string='include_claimed=true', - headers=headers) - listed = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(len(claimed), len(listed['messages'])) - - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - body = self.simulate_get(claim_href, self.project_id) - - claim = jsonutils.loads(body[0]) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(claim_href, - self.srmock.headers_dict['Content-Location']) - self.assertEqual(100, claim['ttl']) - # NOTE(cpp-cabrera): verify that claim age is non-negative - self.assertThat(claim['age'], matchers.GreaterThan(-1)) - - # Try to delete the message without submitting a claim_id - self.simulate_delete(message_href, self.project_id) - self.assertEqual(falcon.HTTP_403, self.srmock.status) - - # Delete the message and its associated claim - self.simulate_delete(message_href, self.project_id, - query_string=params) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Try to get it from the wrong project - self.simulate_get(message_href, 'bogus_project', query_string=params) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Get the message - self.simulate_get(message_href, self.project_id, query_string=params) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Update the claim - new_claim_ttl = '{"ttl": 60}' - creation = timeutils.utcnow() - self.simulate_patch(claim_href, self.project_id, body=new_claim_ttl) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get the claimed messages (again) - body = self.simulate_get(claim_href, self.project_id) - query = timeutils.utcnow() - claim = jsonutils.loads(body[0]) - message_href, params = claim['messages'][0]['href'].split('?') - - self.assertEqual(60, claim['ttl']) - estimated_age = timeutils.delta_seconds(creation, query) - self.assertGreater(estimated_age, claim['age']) - - # Delete the claim - self.simulate_delete(claim['href'], 'bad_id') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(claim['href'], self.project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Try to delete a message with an invalid claim ID - self.simulate_delete(message_href, self.project_id, - query_string=params) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Make sure it wasn't deleted! - self.simulate_get(message_href, self.project_id, query_string=params) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Try to get a claim that doesn't exist - self.simulate_get(claim['href']) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Try to update a claim that doesn't exist - self.simulate_patch(claim['href'], body=doc) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_post_claim_nonexistent_queue(self): - path = self.url_prefix + '/queues/nonexistent/claims' - self.simulate_post(path, self.project_id, - body='{"ttl": 100, "grace": 60}') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_get_claim_nonexistent_queue(self): - path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba' - self.simulate_get(path) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1203842 - def test_get_nonexistent_claim_404s(self): - self.simulate_get(self.claims_path + '/a') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_delete_nonexistent_claim_204s(self): - self.simulate_delete(self.claims_path + '/a') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_patch_nonexistent_claim_404s(self): - patch_data = jsonutils.dumps({'ttl': 100}) - self.simulate_patch(self.claims_path + '/a', body=patch_data) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - -class TestClaimsFaultyDriver(base.V1BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - project_id = '480924' - claims_path = self.url_prefix + '/queues/fizbit/claims' - doc = '{"ttl": 100, "grace": 60}' - - self.simulate_post(claims_path, project_id, body=doc) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(claims_path + '/nichts', project_id) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_patch(claims_path + '/nichts', project_id, body=doc) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(claims_path + '/foo', project_id) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_default_limits.py b/zaqar/tests/unit/transport/wsgi/v1/test_default_limits.py deleted file mode 100644 index e47cb313..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_default_limits.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib - -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import storage -from zaqar.tests.unit.transport.wsgi import base - - -class TestDefaultLimits(base.V1Base): - - config_file = 'wsgi_mongodb_default_limits.conf' - - def setUp(self): - super(TestDefaultLimits, self).setUp() - - self.queue_path = self.url_prefix + '/queues' - self.q1_queue_path = self.queue_path + '/' + uuidutils.generate_uuid() - self.messages_path = self.q1_queue_path + '/messages' - self.claims_path = self.q1_queue_path + '/claims' - - self.simulate_put(self.q1_queue_path) - - def tearDown(self): - self.simulate_delete(self.queue_path) - super(TestDefaultLimits, self).tearDown() - - def test_queue_listing(self): - # 2 queues to list - self.addCleanup(self.simulate_delete, self.queue_path + '/q2') - self.simulate_put(self.queue_path + '/q2') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - with self._prepare_queues(storage.DEFAULT_QUEUES_PER_PAGE + 1): - result = self.simulate_get(self.queue_path) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - queues = jsonutils.loads(result[0])['queues'] - self.assertEqual(storage.DEFAULT_QUEUES_PER_PAGE, len(queues)) - - def test_message_listing(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - - result = self.simulate_get(self.messages_path, - headers={'Client-ID': - uuidutils.generate_uuid()}) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) - - def test_claim_creation(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_CLAIM + 1) - - result = self.simulate_post(self.claims_path, - body='{"ttl": 60, "grace": 60}') - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - messages = jsonutils.loads(result[0]) - self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, len(messages)) - - @contextlib.contextmanager - def _prepare_queues(self, count): - queue_paths = [self.queue_path + '/multi-{0}'.format(i) - for i in range(count)] - - for path in queue_paths: - self.simulate_put(path) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - yield - - for path in queue_paths: - self.simulate_delete(path) - - def _prepare_messages(self, count): - doc = jsonutils.dumps([{'body': 239, 'ttl': 300}] * count) - self.simulate_post(self.messages_path, body=doc, - headers={'Client-ID': uuidutils.generate_uuid()}) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_health.py b/zaqar/tests/unit/transport/wsgi/v1/test_health.py deleted file mode 100644 index 5a61a00c..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_health.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon - -from zaqar.tests.unit.transport.wsgi import base - - -class TestHealth(base.V1Base): - - config_file = 'wsgi_mongodb.conf' - - def test_get(self): - response = self.simulate_get('/v1/health') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual([], response) - - def test_head(self): - response = self.simulate_head('/v1/health') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual([], response) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_home.py b/zaqar/tests/unit/transport/wsgi/v1/test_home.py deleted file mode 100644 index f40c828b..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_home.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon -from oslo_serialization import jsonutils -import six.moves.urllib.parse as urlparse - -from zaqar.tests.unit.transport.wsgi import base - - -class TestHomeDocument(base.V1Base): - - config_file = 'wsgi_mongodb.conf' - - def test_json_response(self): - body = self.simulate_get(self.url_prefix) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - content_type = self.srmock.headers_dict['Content-Type'] - self.assertEqual('application/json-home', content_type) - - try: - jsonutils.loads(body[0]) - except ValueError: - self.fail('Home document is not valid JSON') - - def test_href_template(self): - body = self.simulate_get(self.url_prefix) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - resp = jsonutils.loads(body[0]) - queue_href_template = resp['resources']['rel/queue']['href-template'] - path_1 = 'https://zaqar.example.com' + self.url_prefix - path_2 = 'https://zaqar.example.com' + self.url_prefix + '/' - - # Verify all the href template start with the correct version prefix - for resource in list(resp['resources']): - self.assertTrue(resp['resources'][resource]['href-template']. - startswith(self.url_prefix)) - - url = urlparse.urljoin(path_1, queue_href_template) - expected = ('https://zaqar.example.com' + self.url_prefix + - '/queues/foo') - self.assertEqual(expected, url.format(queue_name='foo')) - - url = urlparse.urljoin(path_2, queue_href_template) - self.assertEqual(expected, url.format(queue_name='foo')) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_media_type.py b/zaqar/tests/unit/transport/wsgi/v1/test_media_type.py deleted file mode 100644 index 387d73c3..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_media_type.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import falcon -from falcon import testing -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - - -from zaqar.tests.unit.transport.wsgi import base - - -class TestMediaType(base.V1Base): - - config_file = 'wsgi_mongodb.conf' - - def test_json_only_endpoints_with_wrong_accept_header(self): - endpoints = ( - ('GET', self.url_prefix + '/queues'), - ('GET', self.url_prefix + '/queues/nonexistent/metadata'), - ('GET', self.url_prefix + '/queues/nonexistent/stats'), - ('POST', self.url_prefix + '/queues/nonexistent/messages'), - ('GET', self.url_prefix + '/queues/nonexistent/messages/deadbeaf'), - ('POST', self.url_prefix + '/queues/nonexistent/claims'), - ('GET', self.url_prefix + '/queues/nonexistent/claims/0ad'), - ('GET', self.url_prefix + '/health'), - ) - - for method, endpoint in endpoints: - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'Accept': 'application/xml', - } - - env = testing.create_environ(endpoint, - method=method, - headers=headers) - - self.app(env, self.srmock) - self.assertEqual(falcon.HTTP_406, self.srmock.status) - - def test_request_with_body_and_urlencoded_contenttype_header_fails(self): - # NOTE(Eva-i): this test case makes sure wsgi 'before' hook - # "require_content_type_be_non_urlencoded" works to prevent - # bug/1547100. - eww_queue_path = self.url_prefix + '/queues/eww' - eww_queue_messages_path = eww_queue_path + '/messages' - sample_message = jsonutils.dumps([{'body': {'eww!'}, 'ttl': 200}]) - bad_headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'Content-Type': 'application/x-www-form-urlencoded', - } - - # Create queue request with bad headers. Should still work, because it - # has no body. - self.simulate_put(eww_queue_path, headers=bad_headers) - self.addCleanup(self.simulate_delete, eww_queue_path, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Post message request with good headers. Should work. - self.simulate_post(eww_queue_messages_path, body=sample_message, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Post message request with bad headers. Should not work. - self.simulate_post(eww_queue_messages_path, body=sample_message, - headers=bad_headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_messages.py b/zaqar/tests/unit/transport/wsgi/v1/test_messages.py deleted file mode 100644 index 37b48a6f..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_messages.py +++ /dev/null @@ -1,509 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from testtools import matchers - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base -from zaqar.transport import validation - - -@ddt.ddt -class TestMessagesMongoDB(base.V1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestMessagesMongoDB, self).setUp() - if self.conf.pooling: - for i in range(4): - uri = "%s/%s" % (self.mongodb_url, str(i)) - doc = {'weight': 100, 'uri': uri} - self.simulate_put(self.url_prefix + '/pools/' + str(i), - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.project_id = '7e55e1a7e' - - # TODO(kgriffs): Add support in self.simulate_* for a "base path" - # so that we don't have to concatenate against self.url_prefix - # all over the place. - self.queue_path = self.url_prefix + '/queues/fizbit' - self.messages_path = self.queue_path + '/messages' - - doc = '{"_ttl": 60}' - self.simulate_put(self.queue_path, self.project_id, body=doc) - - # NOTE(kgriffs): Also register without a project for tests - # that do not specify a project. - # - # TODO(kgriffs): Should a project id always be required or - # automatically supplied in the simulate_* methods? - self.simulate_put(self.queue_path, body=doc) - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - def tearDown(self): - self.simulate_delete(self.queue_path, self.project_id) - if self.conf.pooling: - for i in range(4): - self.simulate_delete(self.url_prefix + '/pools/' + str(i)) - - super(TestMessagesMongoDB, self).tearDown() - - def _test_post(self, sample_messages): - sample_doc = jsonutils.dumps(sample_messages) - - result = self.simulate_post(self.messages_path, self.project_id, - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - - msg_ids = self._get_msg_ids(self.srmock.headers_dict) - self.assertEqual(len(sample_messages), len(msg_ids)) - - expected_resources = [six.text_type(self.messages_path + '/' + id) - for id in msg_ids] - self.assertEqual(expected_resources, result_doc['resources']) - - # NOTE(kgriffs): As of the Icehouse release, drivers are - # required to either completely succeed, or completely fail - # to enqueue the entire batch of messages. - self.assertFalse(result_doc['partial']) - - self.assertEqual(len(sample_messages), len(msg_ids)) - - lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) - - # Test GET on the message resource directly - # NOTE(cpp-cabrera): force the passing of time to age a message - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - for msg_id in msg_ids: - message_uri = self.messages_path + '/' + msg_id - - # Wrong project ID - self.simulate_get(message_uri, '777777') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Correct project ID - result = self.simulate_get(message_uri, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(message_uri, - self.srmock.headers_dict['Content-Location']) - - # Check message properties - message = jsonutils.loads(result[0]) - self.assertEqual(message_uri, message['href']) - self.assertEqual(lookup[message['ttl']], message['body']) - - # no negative age - # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 - self.assertThat(message['age'], - matchers.GreaterThan(-1)) - - # Test bulk GET - query_string = 'ids=' + ','.join(msg_ids) - result = self.simulate_get(self.messages_path, self.project_id, - query_string=query_string) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - expected_ttls = set(m['ttl'] for m in sample_messages) - actual_ttls = set(m['ttl'] for m in result_doc) - self.assertFalse(expected_ttls - actual_ttls) - - def test_exceeded_payloads(self): - # Get a valid message id - self._post_messages(self.messages_path) - msg_id = self._get_msg_id(self.srmock.headers_dict) - - # Bulk GET restriction - query_string = 'ids=' + ','.join([msg_id] * 21) - self.simulate_get(self.messages_path, self.project_id, - query_string=query_string) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Listing restriction - self.simulate_get(self.messages_path, self.project_id, - query_string='limit=21', - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Bulk deletion restriction - query_string = 'ids=' + ','.join([msg_id] * 22) - self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_single(self): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': 200}, - ] - - self._test_post(sample_messages) - - def test_post_multiple(self): - sample_messages = [ - {'body': 239, 'ttl': 100}, - {'body': {'key': 'value'}, 'ttl': 200}, - {'body': [1, 3], 'ttl': 300}, - ] - - self._test_post(sample_messages) - - def test_post_to_non_ascii_queue(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - path = self.url_prefix + u'/queues/non-ascii-n\u0153me/messages' - - if six.PY2: - path = path.encode('utf-8') - - self._post_messages(path) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_with_long_queue_name(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - queues_path = self.url_prefix + '/queues/' - - game_title = 'v' * validation.QUEUE_NAME_MAX_LEN - self._post_messages(queues_path + game_title + '/messages') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - game_title += 'v' - self._post_messages(queues_path + game_title + '/messages') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_to_missing_queue(self): - self._post_messages(self.url_prefix + '/queues/nonexistent/messages') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_from_missing_queue(self): - self.simulate_get(self.url_prefix + '/queues/nonexistent/messages', - self.project_id, - headers={'Client-ID': - 'dfcd3238-425c-11e3-8a80-28cfe91478b9'}) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') - def test_bad_client_id(self, text_id): - self.simulate_post(self.queue_path + '/messages', - body='{"ttl": 60, "body": ""}', - headers={'Client-ID': text_id}) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(self.queue_path + '/messages', - query_string='limit=3&echo=true', - headers={'Client-ID': text_id}) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(None, '[', '[]', '{}', '.') - def test_post_bad_message(self, document): - self.simulate_post(self.queue_path + '/messages', - body=document, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 59, 1209601) - def test_unacceptable_ttl(self, ttl): - self.simulate_post(self.queue_path + '/messages', - body=jsonutils.dumps([{'ttl': ttl, 'body': None}]), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_exceeded_message_posting(self): - # Total (raw request) size - doc = jsonutils.dumps([{'body': "some body", 'ttl': 100}] * 20, - indent=4) - - max_len = self.transport_cfg.max_messages_post_size - long_doc = doc + (' ' * (max_len - len(doc) + 1)) - - self.simulate_post(self.queue_path + '/messages', - body=long_doc, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('{"overflow": 9223372036854775808}', - '{"underflow": -9223372036854775809}') - def test_unsupported_json(self, document): - self.simulate_post(self.queue_path + '/messages', - body=document, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_delete(self): - self._post_messages(self.messages_path) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - self.simulate_delete(target, self.project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Safe to delete non-existing ones - self.simulate_delete(target, self.project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_bulk_delete(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=5) - [target, params] = self.srmock.headers_dict['location'].split('?') - - # Deleting the whole collection is denied - self.simulate_delete(path, self.project_id) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(target, self.project_id, query_string=params) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(target, self.project_id, query_string=params) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Safe to delete non-existing ones - self.simulate_delete(target, self.project_id, query_string=params) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Even after the queue is gone - self.simulate_delete(self.queue_path, self.project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(target, self.project_id, query_string=params) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_list(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=10) - - query_string = 'limit=3&echo=true' - body = self.simulate_get(path, self.project_id, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(path + '?' + query_string, - self.srmock.headers_dict['Content-Location']) - - cnt = 0 - while self.srmock.status == falcon.HTTP_200: - contents = jsonutils.loads(body[0]) - [target, params] = contents['links'][0]['href'].split('?') - - for msg in contents['messages']: - self.simulate_get(msg['href'], self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - body = self.simulate_get(target, self.project_id, - query_string=params, - headers=self.headers) - cnt += 1 - - self.assertEqual(4, cnt) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Stats - body = self.simulate_get(self.queue_path + '/stats', self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - message_stats = jsonutils.loads(body[0])['messages'] - self.assertEqual(self.queue_path + '/stats', - self.srmock.headers_dict['Content-Location']) - - # NOTE(kgriffs): The other parts of the stats are tested - # in tests.storage.base and so are not repeated here. - expected_pattern = self.queue_path + '/messages/[^/]+$' - for message_stat_name in ('oldest', 'newest'): - self.assertThat(message_stats[message_stat_name]['href'], - matchers.MatchesRegex(expected_pattern)) - - # NOTE(kgriffs): Try to get messages for a missing queue - self.simulate_get(self.url_prefix + '/queues/nonexistent/messages', - self.project_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_list_with_bad_marker(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=5) - - query_string = 'limit=3&echo=true&marker=sfhlsfdjh2048' - self.simulate_get(path, self.project_id, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_no_uuid(self): - path = self.queue_path + '/messages' - - self.simulate_post(path, '7e7e7e', - headers={}, - body='[{"body": 0, "ttl": 100}]') - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(path, '7e7e7e', headers={}) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1210633 - def test_when_claim_deleted_then_messages_unclaimed(self): - path = self.queue_path - self._post_messages(path + '/messages', repeat=5) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # post claim - self.simulate_post(path + '/claims', self.project_id, - body='{"ttl": 100, "grace": 100}') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - location = self.srmock.headers_dict['location'] - - # release claim - self.simulate_delete(location, self.project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # get unclaimed messages - self.simulate_get(path + '/messages', self.project_id, - query_string='echo=true', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1203842 - def test_get_nonexistent_message_404s(self): - path = self.url_prefix + '/queues/notthere/messages/a' - self.simulate_get(path) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_multiple_invalid_messages_204s(self): - path = self.url_prefix + '/queues/notthere/messages' - self.simulate_get(path, query_string='ids=a,b,c') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_delete_multiple_invalid_messages_204s(self): - path = self.url_prefix + '/queues/notthere/messages' - self.simulate_delete(path, query_string='ids=a,b,c') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_delete_message_with_invalid_claim_doesnt_delete_message(self): - path = self.queue_path - resp = self._post_messages(path + '/messages', 1) - location = jsonutils.loads(resp[0])['resources'][0] - - self.simulate_delete(location, self.project_id, - query_string='claim_id=invalid') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(location, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_no_duplicated_messages_path_in_href(self): - """Test for bug 1240897.""" - - path = self.queue_path + '/messages' - self._post_messages(path, repeat=1) - - msg_id = self._get_msg_id(self.srmock.headers_dict) - - query_string = 'ids=%s' % msg_id - body = self.simulate_get(path, self.project_id, - query_string=query_string, - headers=self.headers) - messages = jsonutils.loads(body[0]) - - self.assertNotIn(self.queue_path + '/messages/messages', - messages[0]['href']) - - def _post_messages(self, target, repeat=1): - doc = jsonutils.dumps([{'body': 239, 'ttl': 300}] * repeat) - return self.simulate_post(target, self.project_id, body=doc, - headers=self.headers) - - def _get_msg_id(self, headers): - return self._get_msg_ids(headers)[0] - - def _get_msg_ids(self, headers): - return headers['location'].rsplit('=', 1)[-1].split(',') - - -class TestMessagesMongoDBPooled(TestMessagesMongoDB): - - config_file = 'wsgi_mongodb_pooled.conf' - - # TODO(cpp-cabrera): remove this skipTest once pooled queue - # listing is implemented - def test_list(self): - self.skipTest("Need to implement pooled queue listing.") - - -class TestMessagesFaultyDriver(base.V1BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - project_id = 'xyz' - path = self.url_prefix + '/queues/fizbit/messages' - doc = '[{"body": 239, "ttl": 100}]' - headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - self.simulate_post(path, project_id, - body=doc, - headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(path, project_id, - headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(path + '/nonexistent', project_id) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(path + '/nada', project_id) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_pools.py b/zaqar/tests/unit/transport/wsgi/v1/test_pools.py deleted file mode 100644 index 3e61c545..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_pools.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import contextlib - -import ddt -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@contextlib.contextmanager -def pool(test, name, weight, uri, options={}): - """A context manager for constructing a pool for use in testing. - - Deletes the pool after exiting the context. - - :param test: Must expose simulate_* methods - :param name: Name for this pool - :type name: six.text_type - :type weight: int - :type uri: six.text_type - :type options: dict - :returns: (name, weight, uri, options) - :rtype: see above - """ - uri = "%s/%s" % (uri, uuidutils.generate_uuid()) - doc = {'weight': weight, 'uri': uri, 'options': options} - path = test.url_prefix + '/pools/' + name - - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield name, weight, uri, options - - finally: - test.simulate_delete(path) - - -@contextlib.contextmanager -def pools(test, count, uri): - """A context manager for constructing pools for use in testing. - - Deletes the pools after exiting the context. - - :param test: Must expose simulate_* methods - :param count: Number of pools to create - :type count: int - :returns: (paths, weights, uris, options) - :rtype: ([six.text_type], [int], [six.text_type], [dict]) - """ - mongo_url = uri - base = test.url_prefix + '/pools/' - args = [(base + str(i), i, - {str(i): i}) - for i in range(count)] - for path, weight, option in args: - uri = "%s/%s" % (mongo_url, uuidutils.generate_uuid()) - doc = {'weight': weight, 'uri': uri, 'options': option} - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield args - finally: - for path, _, _ in args: - test.simulate_delete(path) - - -@ddt.ddt -class TestPoolsMongoDB(base.V1Base): - - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestPoolsMongoDB, self).setUp() - self.doc = {'weight': 100, 'uri': self.mongodb_url} - self.pool = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(self.pool, body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - super(TestPoolsMongoDB, self).tearDown() - self.simulate_delete(self.pool) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_put_pool_works(self): - name = uuidutils.generate_uuid() - weight, uri = self.doc['weight'], self.doc['uri'] - with pool(self, name, weight, uri): - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_put_raises_if_missing_fields(self): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, body=jsonutils.dumps({'weight': 100})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(path, - body=jsonutils.dumps( - {'uri': self.mongodb_url})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, 'big') - def test_put_raises_if_invalid_weight(self, weight): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - doc = {'weight': weight, 'uri': 'a'} - self.simulate_put(path, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, [], 'localhost:27017') - def test_put_raises_if_invalid_uri(self, uri): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, - body=jsonutils.dumps({'weight': 1, 'uri': uri})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_put_raises_if_invalid_options(self, options): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - doc = {'weight': 1, 'uri': 'a', 'options': options} - self.simulate_put(path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_put_existing_overwrites(self): - # NOTE(cabrera): setUp creates default pool - expect = self.doc - self.simulate_put(self.pool, - body=jsonutils.dumps(expect)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result = self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - doc = jsonutils.loads(result[0]) - self.assertEqual(expect['weight'], doc['weight']) - self.assertEqual(expect['uri'], doc['uri']) - - def test_delete_works(self): - self.simulate_delete(self.pool) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_nonexisting_raises_404(self): - self.simulate_get(self.url_prefix + '/pools/nonexisting') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def _pool_expect(self, pool, xhref, xweight, xuri): - self.assertIn('href', pool) - self.assertIn('name', pool) - self.assertEqual(xhref, pool['href']) - self.assertIn('weight', pool) - self.assertEqual(xweight, pool['weight']) - self.assertIn('uri', pool) - - # NOTE(dynarro): we are using startwith because we are adding to - # pools UUIDs, to avoid dupplications - self.assertTrue(pool['uri'].startswith(xuri)) - - def test_get_works(self): - result = self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, self.doc['weight'], - self.doc['uri']) - - def test_detailed_get_works(self): - result = self.simulate_get(self.pool, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, self.doc['weight'], - self.doc['uri']) - self.assertIn('options', pool) - self.assertEqual({}, pool['options']) - - def test_patch_raises_if_missing_fields(self): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'location': 1})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _patch_test(self, doc): - self.simulate_patch(self.pool, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result = self.simulate_get(self.pool, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, doc['weight'], - doc['uri']) - self.assertEqual(doc['options'], pool['options']) - - def test_patch_works(self): - doc = {'weight': 101, - 'uri': self.mongodb_url, - 'options': {'a': 1}} - self._patch_test(doc) - - def test_patch_works_with_extra_fields(self): - doc = {'weight': 101, - 'uri': self.mongodb_url, - 'options': {'a': 1}, - 'location': 100, 'partition': 'taco'} - self._patch_test(doc) - - @ddt.data(-1, 2**32+1, 'big') - def test_patch_raises_400_on_invalid_weight(self, weight): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'weight': weight})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, [], 'localhost:27017') - def test_patch_raises_400_on_invalid_uri(self, uri): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'uri': uri})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_patch_raises_400_on_invalid_options(self, options): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'options': options})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_patch_raises_404_if_pool_not_found(self): - self.simulate_patch(self.url_prefix + '/pools/notexists', - body=jsonutils.dumps({'weight': 1})) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_empty_listing(self): - self.simulate_delete(self.pool) - result = self.simulate_get(self.url_prefix + '/pools') - results = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(0, len(results['pools'])) - self.assertIn('links', results) - - def _listing_test(self, count=10, limit=10, - marker=None, detailed=False): - # NOTE(cpp-cabrera): delete initial pool - it will interfere - # with listing tests - self.simulate_delete(self.pool) - query = 'limit={0}&detailed={1}'.format(limit, detailed) - if marker: - query += '&marker={0}'.format(marker) - - with pools(self, count, self.doc['uri']) as expected: - result = self.simulate_get(self.url_prefix + '/pools', - query_string=query) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertIsInstance(results, dict) - self.assertIn('pools', results) - self.assertIn('links', results) - pool_list = results['pools'] - - link = results['links'][0] - self.assertEqual('next', link['rel']) - href = falcon.uri.parse_query_string(link['href'].split('?')[1]) - self.assertIn('marker', href) - self.assertEqual(str(limit), href['limit']) - self.assertEqual(str(detailed).lower(), href['detailed']) - - next_query_string = ('marker={marker}&limit={limit}' - '&detailed={detailed}').format(**href) - next_result = self.simulate_get(link['href'].split('?')[0], - query_string=next_query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - next_pool = jsonutils.loads(next_result[0]) - next_pool_list = next_pool['pools'] - - self.assertIn('links', next_pool) - if limit < count: - self.assertEqual(min(limit, count-limit), - len(next_pool_list)) - else: - # NOTE(jeffrey4l): when limit >= count, there will be no - # pools in the 2nd page. - self.assertEqual(0, len(next_pool_list)) - - self.assertEqual(min(limit, count), len(pool_list)) - for s in pool_list + next_pool_list: - # NOTE(flwang): It can't assumed that both sqlalchemy and - # mongodb can return query result with the same order. Just - # like the order they're inserted. Actually, sqlalchemy can't - # guarantee that. So we're leveraging the relationship between - # pool weight and the index of pools fixture to get the - # right pool to verify. - expect = expected[s['weight']] - path, weight = expect[:2] - self._pool_expect(s, path, weight, self.doc['uri']) - if detailed: - self.assertIn('options', s) - self.assertEqual(expect[-1], s['options']) - else: - self.assertNotIn('options', s) - - def test_listing_works(self): - self._listing_test() - - def test_detailed_listing_works(self): - self._listing_test(detailed=True) - - @ddt.data(1, 5, 10, 15) - def test_listing_works_with_limit(self, limit): - self._listing_test(count=15, limit=limit) - - def test_listing_marker_is_respected(self): - self.simulate_delete(self.pool) - - with pools(self, 10, self.doc['uri']) as expected: - result = self.simulate_get(self.url_prefix + '/pools', - query_string='marker=3') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool_list = jsonutils.loads(result[0])['pools'] - self.assertEqual(6, len(pool_list)) - path, weight = expected[4][:2] - self._pool_expect(pool_list[0], path, weight, self.doc['uri']) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_queue_lifecycle.py b/zaqar/tests/unit/transport/wsgi/v1/test_queue_lifecycle.py deleted file mode 100644 index 4bd59d4b..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_queue_lifecycle.py +++ /dev/null @@ -1,401 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -import six - -from zaqar.storage import errors as storage_errors -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestQueueLifecycleMongoDB(base.V1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestQueueLifecycleMongoDB, self).setUp() - - self.queue_path = self.url_prefix + '/queues' - self.gumshoe_queue_path = self.queue_path + '/gumshoe' - self.fizbat_queue_path = self.queue_path + '/fizbat' - self.fizbat_queue_path_metadata = self.fizbat_queue_path + '/metadata' - - def tearDown(self): - storage = self.boot.storage._storage - connection = storage.connection - - connection.drop_database(self.boot.control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - - super(TestQueueLifecycleMongoDB, self).tearDown() - - def test_empty_project_id(self): - self.simulate_get(self.gumshoe_queue_path, '') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(self.gumshoe_queue_path, '') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_head(self.gumshoe_queue_path, '') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(self.gumshoe_queue_path, '') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('480924', 'foo', None) - def test_basics_thoroughly(self, project_id): - gumshoe_queue_path_metadata = self.gumshoe_queue_path + '/metadata' - gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats' - - # Stats not found - queue not created yet - self.simulate_get(gumshoe_queue_path_stats, project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Metadata not found - queue not created yet - self.simulate_get(gumshoe_queue_path_metadata, project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Create - self.simulate_put(self.gumshoe_queue_path, project_id) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - location = self.srmock.headers_dict['Location'] - self.assertEqual(self.gumshoe_queue_path, location) - - # Ensure queue existence - self.simulate_head(self.gumshoe_queue_path, project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Add metadata - doc = '{"messages": {"ttl": 600}}' - self.simulate_put(gumshoe_queue_path_metadata, - project_id, body=doc) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Fetch metadata - result = self.simulate_get(gumshoe_queue_path_metadata, - project_id) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(jsonutils.loads(doc), result_doc) - - # Stats empty queue - self.simulate_get(gumshoe_queue_path_stats, project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Delete - self.simulate_delete(self.gumshoe_queue_path, project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get non-existent queue - self.simulate_get(self.gumshoe_queue_path, project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Get non-existent stats - self.simulate_get(gumshoe_queue_path_stats, project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Get non-existent metadata - self.simulate_get(gumshoe_queue_path_metadata, project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_name_restrictions(self): - self.simulate_put(self.queue_path + '/Nice-Boat_2') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.queue_path + '/Nice-Bo@t') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_project_id_restriction(self): - muvluv_queue_path = self.queue_path + '/Muv-Luv' - - self.simulate_put(muvluv_queue_path, - headers={'X-Project-ID': 'JAM Project' * 24}) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # no charset restrictions - self.simulate_put(muvluv_queue_path, - headers={'X-Project-ID': 'JAM Project'}) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_non_ascii_name(self): - test_params = ((u'/queues/non-ascii-n\u0153me', 'utf-8'), - (u'/queues/non-ascii-n\xc4me', 'iso8859-1')) - - for uri, enc in test_params: - uri = self.url_prefix + uri - - if six.PY2: - uri = uri.encode(enc) - - self.simulate_put(uri) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(uri) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(uri) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_no_metadata(self): - self.simulate_put(self.fizbat_queue_path) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.fizbat_queue_path_metadata) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(self.fizbat_queue_path_metadata, body='') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('{', '[]', '.', ' ', '') - def test_bad_metadata(self, document): - self.simulate_put(self.fizbat_queue_path, '7e55e1a7e') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.fizbat_queue_path_metadata, '7e55e1a7e', - body=document) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_too_much_metadata(self): - self.simulate_put(self.fizbat_queue_path, '7e55e1a7e') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size - (len(doc) - 10) + 1 - - doc = doc.format(pad='x' * padding_len) - - self.simulate_put(self.fizbat_queue_path_metadata, '7e55e1a7e', - body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_way_too_much_metadata(self): - self.simulate_put(self.fizbat_queue_path, '7e55e1a7e') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size * 100 - - doc = doc.format(pad='x' * padding_len) - - self.simulate_put(self.fizbat_queue_path_metadata, - '7e55e1a7e', body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_custom_metadata(self): - self.simulate_put(self.fizbat_queue_path, '480924') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Set - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size - (len(doc) - 2) - - doc = doc.format(pad='x' * padding_len) - self.simulate_put(self.fizbat_queue_path_metadata, '480924', body=doc) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get - result = self.simulate_get(self.fizbat_queue_path_metadata, '480924') - result_doc = jsonutils.loads(result[0]) - self.assertEqual(jsonutils.loads(doc), result_doc) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_update_metadata(self): - xyz_queue_path = self.url_prefix + '/queues/xyz' - xyz_queue_path_metadata = xyz_queue_path + '/metadata' - - # Create - project_id = '480924' - self.simulate_put(xyz_queue_path, project_id) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Set meta - doc1 = '{"messages": {"ttl": 600}}' - self.simulate_put(xyz_queue_path_metadata, project_id, body=doc1) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Update - doc2 = '{"messages": {"ttl": 100}}' - self.simulate_put(xyz_queue_path_metadata, project_id, body=doc2) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get - result = self.simulate_get(xyz_queue_path_metadata, project_id) - result_doc = jsonutils.loads(result[0]) - - self.assertEqual(jsonutils.loads(doc2), result_doc) - self.assertEqual(xyz_queue_path_metadata, - self.srmock.headers_dict['Content-Location']) - - def test_list(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - - # NOTE(kgriffs): It's important that this one sort after the one - # above. This is in order to prove that bug/1236605 is fixed, and - # stays fixed! - alt_project_id = str(arbitrary_number + 1) - - # List empty - self.simulate_get(self.queue_path, project_id) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Payload exceeded - self.simulate_get(self.queue_path, project_id, query_string='limit=21') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Create some - def create_queue(name, project_id, body): - uri = self.queue_path + '/' + name - self.simulate_put(uri, project_id) - self.simulate_put(uri + '/metadata', project_id, body=body) - - create_queue('g1', None, '{"answer": 42}') - create_queue('g2', None, '{"answer": 42}') - - create_queue('q1', project_id, '{"node": 31}') - create_queue('q2', project_id, '{"node": 32}') - create_queue('q3', project_id, '{"node": 33}') - - create_queue('q3', alt_project_id, '{"alt": 1}') - - # List (global queues) - result = self.simulate_get(self.queue_path, None, - query_string='limit=2&detailed=true') - - result_doc = jsonutils.loads(result[0]) - queues = result_doc['queues'] - self.assertEqual(2, len(queues)) - - for queue in queues: - self.assertEqual({'answer': 42}, queue['metadata']) - - # List (limit) - result = self.simulate_get(self.queue_path, project_id, - query_string='limit=2') - - result_doc = jsonutils.loads(result[0]) - self.assertEqual(2, len(result_doc['queues'])) - - # List (no metadata, get all) - result = self.simulate_get(self.queue_path, - project_id, query_string='limit=5') - - result_doc = jsonutils.loads(result[0]) - [target, params] = result_doc['links'][0]['href'].split('?') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(self.queue_path + '?limit=5', - self.srmock.headers_dict['Content-Location']) - - # Ensure we didn't pick up the queue from the alt project. - queues = result_doc['queues'] - self.assertEqual(3, len(queues)) - - for queue in queues: - self.simulate_get(queue['href'] + '/metadata', project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - self.simulate_get(queue['href'] + '/metadata', 'imnothere') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - self.assertNotIn('metadata', queue) - - # List with metadata - result = self.simulate_get(self.queue_path, project_id, - query_string='detailed=true') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - [target, params] = result_doc['links'][0]['href'].split('?') - - queue = result_doc['queues'][0] - result = self.simulate_get(queue['href'] + '/metadata', project_id) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(queue['metadata'], result_doc) - self.assertEqual({'node': 31}, result_doc) - - # List tail - self.simulate_get(target, project_id, query_string=params) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # List manually-constructed tail - self.simulate_get(target, project_id, query_string='marker=zzz') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_list_returns_503_on_nopoolfound_exception(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - header = { - 'X-Project-ID': project_id, - } - - queue_controller = self.boot.storage.queue_controller - - with mock.patch.object(queue_controller, 'list') as mock_queue_list: - - def queue_generator(): - raise storage_errors.NoPoolFound() - - # This generator tries to be like queue controller list generator - # in some ways. - def fake_generator(): - yield queue_generator() - yield {} - mock_queue_list.return_value = fake_generator() - self.simulate_get(self.queue_path, headers=header) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - -class TestQueueLifecycleFaultyDriver(base.V1BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - gumshoe_queue_path = self.url_prefix + '/queues/gumshoe' - doc = '{"messages": {"ttl": 600}}' - self.simulate_put(gumshoe_queue_path, '480924', body=doc) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - location = ('Location', gumshoe_queue_path) - self.assertNotIn(location, self.srmock.headers) - - result = self.simulate_get(gumshoe_queue_path + '/metadata', '480924') - result_doc = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - self.assertNotEqual(result_doc, jsonutils.loads(doc)) - - self.simulate_get(gumshoe_queue_path + '/stats', '480924') - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(self.url_prefix + '/queues', '480924') - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(gumshoe_queue_path, '480924') - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1/test_validation.py b/zaqar/tests/unit/transport/wsgi/v1/test_validation.py deleted file mode 100644 index ad50c678..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1/test_validation.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from oslo_utils import uuidutils - -import falcon - -from zaqar.tests.unit.transport.wsgi import base - - -class TestValidation(base.V1Base): - - config_file = 'wsgi_mongodb_validation.conf' - - def setUp(self): - super(TestValidation, self).setUp() - - self.project_id = '7e55e1a7e' - - self.queue_path = self.url_prefix + '/queues/noein' - self.simulate_put(self.queue_path, self.project_id) - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - def tearDown(self): - self.simulate_delete(self.queue_path, self.project_id) - super(TestValidation, self).tearDown() - - def test_metadata_deserialization(self): - # Normal case - self.simulate_put(self.queue_path + '/metadata', - self.project_id, - body='{"timespace": "Shangri-la"}') - - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Too long - max_queue_metadata = 64 - - doc_tmpl = '{{"Dragon Torc":"{0}"}}' - doc_tmpl_ws = '{{ "Dragon Torc" : "{0}" }}' # with whitespace - envelope_length = len(doc_tmpl.format('')) - - for tmpl in doc_tmpl, doc_tmpl_ws: - gen = '0' * (max_queue_metadata - envelope_length + 1) - doc = tmpl.format(gen) - self.simulate_put(self.queue_path + '/metadata', - self.project_id, - body=doc) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_message_deserialization(self): - # Normal case - self.simulate_post(self.queue_path + '/messages', - self.project_id, - body='[{"body": "Dragon Knights", "ttl": 100}]', - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Both messages' size are too long - max_messages_post_size = 256 - - obj = {'a': 0, 'b': ''} - envelope_length = len(json.dumps(obj, separators=(',', ':'))) - obj['b'] = 'x' * (max_messages_post_size - envelope_length + 1) - - for long_body in ('a' * (max_messages_post_size - 2 + 1), obj): - doc = json.dumps([{'body': long_body, 'ttl': 100}]) - self.simulate_post(self.queue_path + '/messages', - self.project_id, - body=doc, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_request_without_client_id(self): - # Unlike newer APIs (v1.1 and v2), there will be no error 400, because - # of missing Client-ID in headers. - empty_headers = {} - self.simulate_put(self.queue_path, - self.project_id, - headers=empty_headers) - # Queue was already created by setUp, expecting 204 response code. - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_request_without_client_id_if_resource_name_contains_v2_text(self): - empty_headers = {} - queue_path_with_v2 = self.url_prefix + '/queues/my_name_is_v2' - self.simulate_put(queue_path_with_v2, - self.project_id, - headers=empty_headers) - self.addCleanup(self.simulate_delete, queue_path_with_v2, - self.project_id) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_queue_metadata_putting(self): - # Ensure setting reserved queue attributes (which names start with - # '_') is not allowed in API v1. - - # Try set real _default_message_ttl queue attribute. - self.simulate_put(self.queue_path + '/metadata', - self.project_id, - body='{"_default_message_ttl": 60}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Try set a fictional queue attribute. - self.simulate_put(self.queue_path + '/metadata', - self.project_id, - body='{"_min_message_niceness": 9000}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/__init__.py b/zaqar/tests/unit/transport/wsgi/v1_1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_auth.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_auth.py deleted file mode 100644 index cf844e0a..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_auth.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Test Auth.""" - - -import falcon -from falcon import testing -from keystonemiddleware import auth_token -from oslo_utils import uuidutils - -from zaqar.tests.unit.transport.wsgi import base - - -class TestAuth(base.V1_1Base): - - config_file = 'keystone_auth.conf' - - def setUp(self): - super(TestAuth, self).setUp() - self.headers = {'Client-ID': uuidutils.generate_uuid()} - - def test_auth_install(self): - self.assertIsInstance(self.app._auth_app, auth_token.AuthProtocol) - - def test_non_authenticated(self): - env = testing.create_environ(self.url_prefix + '/480924/queues/', - method='GET', - headers=self.headers) - - self.app(env, self.srmock) - self.assertEqual(falcon.HTTP_401, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_claims.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_claims.py deleted file mode 100644 index 4c096b19..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_claims.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import json - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -from testtools import matchers - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestClaimsMongoDB(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestClaimsMongoDB, self).setUp() - - self.default_claim_ttl = self.boot.transport._defaults.claim_ttl - self.project_id = '737_abc8332832' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - self.queue_path = self.url_prefix + '/queues/fizbit' - self.claims_path = self.queue_path + '/claims' - self.messages_path = self.queue_path + '/messages' - - doc = json.dumps({"_ttl": 60}) - - self.simulate_put(self.queue_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - doc = json.dumps({'messages': [{'body': 239, 'ttl': 300}] * 10}) - self.simulate_post(self.queue_path + '/messages', - body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - storage = self.boot.storage._storage - control = self.boot.control - connection = storage.connection - - connection.drop_database(control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - self.simulate_delete(self.queue_path, headers=self.headers) - - super(TestClaimsMongoDB, self).tearDown() - - @ddt.data('[', '[]', '.', '"fail"') - def test_bad_claim(self, doc): - self.simulate_post(self.claims_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - href = self._get_a_claim() - - self.simulate_patch(href, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_exceeded_claim(self): - self.simulate_post(self.claims_path, - body='{"ttl": 100, "grace": 60}', - query_string='limit=21', headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) - def test_unacceptable_ttl_or_grace(self, ttl_grace): - ttl, grace = ttl_grace - self.simulate_post(self.claims_path, - body=json.dumps({'ttl': ttl, 'grace': grace}), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 59, 43201) - def test_unacceptable_new_ttl(self, ttl): - href = self._get_a_claim() - - self.simulate_patch(href, - body=json.dumps({'ttl': ttl}), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_default_ttl_and_grace(self): - self.simulate_post(self.claims_path, - body='{}', headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - body = self.simulate_get(self.srmock.headers_dict['location'], - headers=self.headers) - - claim = jsonutils.loads(body[0]) - self.assertEqual(self.default_claim_ttl, claim['ttl']) - - def _get_a_claim(self): - doc = '{"ttl": 100, "grace": 60}' - self.simulate_post(self.claims_path, body=doc, headers=self.headers) - return self.srmock.headers_dict['Location'] - - def test_lifecycle(self): - doc = '{"ttl": 100, "grace": 60}' - - # First, claim some messages - body = self.simulate_post(self.claims_path, body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - claimed = jsonutils.loads(body[0])['messages'] - claim_href = self.srmock.headers_dict['Location'] - message_href, params = claimed[0]['href'].split('?') - - # No more messages to claim - self.simulate_post(self.claims_path, body=doc, - query_string='limit=3', headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Listing messages, by default, won't include claimed, will echo - body = self.simulate_get(self.messages_path, - headers=self.headers, - query_string="echo=true") - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # Listing messages, by default, won't include claimed, won't echo - body = self.simulate_get(self.messages_path, - headers=self.headers, - query_string="echo=false") - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # List messages, include_claimed, but don't echo - body = self.simulate_get(self.messages_path, - query_string='include_claimed=true' - '&echo=false', - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # List messages with a different client-id and echo=false. - # Should return some messages - headers = self.headers.copy() - headers["Client-ID"] = uuidutils.generate_uuid() - body = self.simulate_get(self.messages_path, - query_string='include_claimed=true' - '&echo=false', - headers=headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Include claimed messages this time, and echo - body = self.simulate_get(self.messages_path, - query_string='include_claimed=true' - '&echo=true', - headers=self.headers) - listed = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(len(claimed), len(listed['messages'])) - - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - body = self.simulate_get(claim_href, headers=self.headers) - - claim = jsonutils.loads(body[0]) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(100, claim['ttl']) - # NOTE(cpp-cabrera): verify that claim age is non-negative - self.assertThat(claim['age'], matchers.GreaterThan(-1)) - - # Try to delete the message without submitting a claim_id - self.simulate_delete(message_href, headers=self.headers) - self.assertEqual(falcon.HTTP_403, self.srmock.status) - - # Delete the message and its associated claim - self.simulate_delete(message_href, - query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Try to get it from the wrong project - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'bogusproject' - } - self.simulate_get(message_href, query_string=params, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Get the message - self.simulate_get(message_href, query_string=params, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Update the claim - new_claim_ttl = '{"ttl": 60, "grace": 60}' - creation = timeutils.utcnow() - self.simulate_patch(claim_href, body=new_claim_ttl, - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get the claimed messages (again) - body = self.simulate_get(claim_href, headers=self.headers) - query = timeutils.utcnow() - claim = jsonutils.loads(body[0]) - message_href, params = claim['messages'][0]['href'].split('?') - - self.assertEqual(60, claim['ttl']) - estimated_age = timeutils.delta_seconds(creation, query) - self.assertGreater(estimated_age, claim['age']) - - # Delete the claim - self.simulate_delete(claim['href'], headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Try to delete a message with an invalid claim ID - self.simulate_delete(message_href, - query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Make sure it wasn't deleted! - self.simulate_get(message_href, query_string=params, - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Try to get a claim that doesn't exist - self.simulate_get(claim['href'], headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Try to update a claim that doesn't exist - self.simulate_patch(claim['href'], body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_post_claim_nonexistent_queue(self): - path = self.url_prefix + '/queues/nonexistent/claims' - self.simulate_post(path, - body='{"ttl": 100, "grace": 60}', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_get_claim_nonexistent_queue(self): - path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba' - self.simulate_get(path, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1203842 - def test_get_nonexistent_claim_404s(self): - self.simulate_get(self.claims_path + '/a', headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_delete_nonexistent_claim_204s(self): - self.simulate_delete(self.claims_path + '/a', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_patch_nonexistent_claim_404s(self): - patch_data = json.dumps({'ttl': 100}) - self.simulate_patch(self.claims_path + '/a', body=patch_data, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - -class TestClaimsFaultyDriver(base.V1_1BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - self.project_id = '480924abc_' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - claims_path = self.url_prefix + '/queues/fizbit/claims' - doc = '{"ttl": 100, "grace": 60}' - - self.simulate_post(claims_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(claims_path + '/nichts', headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_patch(claims_path + '/nichts', body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(claims_path + '/foo', headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_default_limits.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_default_limits.py deleted file mode 100644 index 5d1b39c1..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_default_limits.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib - -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import storage -from zaqar.tests.unit.transport.wsgi import base - - -class TestDefaultLimits(base.V1_1Base): - - config_file = 'wsgi_mongodb_default_limits.conf' - - def setUp(self): - super(TestDefaultLimits, self).setUp() - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '%s_' % uuidutils.generate_uuid() - } - self.queue_path = self.url_prefix + '/queues' - self.q1_queue_path = self.queue_path + '/' + uuidutils.generate_uuid() - self.messages_path = self.q1_queue_path + '/messages' - self.claims_path = self.q1_queue_path + '/claims' - - self.simulate_put(self.q1_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - self.simulate_delete(self.queue_path, headers=self.headers) - super(TestDefaultLimits, self).tearDown() - - def test_queue_listing(self): - # 2 queues to list - self.simulate_put(self.queue_path + '/q2', headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - with self._prepare_queues(storage.DEFAULT_QUEUES_PER_PAGE + 1): - result = self.simulate_get(self.queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - queues = jsonutils.loads(result[0])['queues'] - self.assertEqual(storage.DEFAULT_QUEUES_PER_PAGE, len(queues)) - - def test_message_listing_different_id(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - - headers = self.headers.copy() - headers['Client-ID'] = uuidutils.generate_uuid() - result = self.simulate_get(self.messages_path, - headers=headers, - query_string='echo=false') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) - - def test_message_listing_same_id(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - result = self.simulate_get(self.messages_path, - headers=self.headers, - query_string='echo=false') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(result) - - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - result = self.simulate_get(self.messages_path, - headers=self.headers, - query_string='echo=true') - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) - - def test_claim_creation(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_CLAIM + 1) - - result = self.simulate_post(self.claims_path, - body='{"ttl": 60, "grace": 60}', - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, len(messages)) - - @contextlib.contextmanager - def _prepare_queues(self, count): - queue_paths = [self.queue_path + '/multi-{0}'.format(i) - for i in range(count)] - - for path in queue_paths: - self.simulate_put(path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - yield - - for path in queue_paths: - self.simulate_delete(path, headers=self.headers) - - def _prepare_messages(self, count): - doc = {'messages': [{'body': 239, 'ttl': 300}] * count} - body = jsonutils.dumps(doc) - self.simulate_post(self.messages_path, body=body, - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_flavors.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_flavors.py deleted file mode 100644 index 4f8dcaaa..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_flavors.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import contextlib -import uuid - -import ddt -import falcon -from oslo_serialization import jsonutils - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@contextlib.contextmanager -def flavor(test, name, pool_group, capabilities={}): - """A context manager for constructing a flavor for use in testing. - - Deletes the flavor after exiting the context. - - :param test: Must expose simulate_* methods - :param name: Name for this flavor - :type name: six.text_type - :type pool_group: six.text_type - :type capabilities: dict - :returns: (name, uri, capabilities) - :rtype: see above - - """ - - doc = {'pool_group': pool_group, 'capabilities': capabilities} - path = test.url_prefix + '/flavors/' + name - - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield name, pool_group, capabilities - - finally: - test.simulate_delete(path) - - -@contextlib.contextmanager -def flavors(test, count, pool_group): - """A context manager for constructing flavors for use in testing. - - Deletes the flavors after exiting the context. - - :param test: Must expose simulate_* methods - :param count: Number of pools to create - :type count: int - :returns: (paths, pool_group, capabilities) - :rtype: ([six.text_type], [six.text_type], [dict]) - - """ - - base = test.url_prefix + '/flavors/' - args = sorted([(base + str(i), {str(i): i}, str(i)) for i in range(count)], - key=lambda tup: tup[2]) - for path, capabilities, _ in args: - doc = {'pool_group': pool_group, 'capabilities': capabilities} - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield args - finally: - for path, _, _ in args: - test.simulate_delete(path) - - -@ddt.ddt -class TestFlavorsMongoDB(base.V1_1Base): - - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestFlavorsMongoDB, self).setUp() - self.queue = 'test-queue' - self.queue_path = self.url_prefix + '/queues/' + self.queue - - self.pool = 'mypool' - self.pool_group = 'mypool-group' - self.pool_path = self.url_prefix + '/pools/' + self.pool - self.pool_doc = {'weight': 100, - 'group': self.pool_group, - 'uri': self.mongodb_url} - self.simulate_put(self.pool_path, body=jsonutils.dumps(self.pool_doc)) - - self.flavor = 'test-flavor' - self.doc = {'capabilities': {}, 'pool_group': self.pool_group} - self.flavor_path = self.url_prefix + '/flavors/' + self.flavor - self.simulate_put(self.flavor_path, body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - self.simulate_delete(self.queue_path) - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(self.pool_path) - super(TestFlavorsMongoDB, self).tearDown() - - def test_put_flavor_works(self): - name = str(uuid.uuid1()) - with flavor(self, name, self.doc['pool_group']): - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_put_raises_if_missing_fields(self): - path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) - self.simulate_put(path, body=jsonutils.dumps({})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(path, - body=jsonutils.dumps({'capabilities': {}})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(1, 2**32+1, []) - def test_put_raises_if_invalid_pool(self, pool): - path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) - self.simulate_put(path, - body=jsonutils.dumps({'pool_group': pool})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_put_raises_if_invalid_capabilities(self, capabilities): - path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) - doc = {'pool_group': 'a', 'capabilities': capabilities} - self.simulate_put(path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_put_existing_overwrites(self): - # NOTE(cabrera): setUp creates default flavor - expect = self.doc - self.simulate_put(self.flavor_path, - body=jsonutils.dumps(expect)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result = self.simulate_get(self.flavor_path) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - doc = jsonutils.loads(result[0]) - self.assertEqual(expect['pool_group'], doc['pool_group']) - - def test_create_flavor_no_pool_group(self): - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(self.pool_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - resp = self.simulate_put(self.flavor_path, - body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - self.assertEqual( - {'description': 'Flavor test-flavor could not be created. ' - 'Pool group mypool-group does not exist', - 'title': 'Unable to create'}, - jsonutils.loads(resp[0])) - - def test_delete_works(self): - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(self.flavor_path) - self.assertEqual(self.srmock.status, falcon.HTTP_404) - - def test_get_nonexisting_raises_404(self): - self.simulate_get(self.url_prefix + '/flavors/nonexisting') - self.assertEqual(self.srmock.status, falcon.HTTP_404) - - def _flavor_expect(self, flavor, xhref, xpool): - self.assertIn('href', flavor) - self.assertIn('name', flavor) - self.assertEqual(xhref, flavor['href']) - self.assertIn('pool_group', flavor) - self.assertEqual(xpool, flavor['pool_group']) - - def test_get_works(self): - result = self.simulate_get(self.flavor_path) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor = jsonutils.loads(result[0]) - self._flavor_expect(flavor, self.flavor_path, self.doc['pool_group']) - - def test_detailed_get_works(self): - result = self.simulate_get(self.flavor_path, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor = jsonutils.loads(result[0]) - self._flavor_expect(flavor, self.flavor_path, self.doc['pool_group']) - self.assertIn('capabilities', flavor) - self.assertEqual({}, flavor['capabilities']) - - def test_patch_raises_if_missing_fields(self): - self.simulate_patch(self.flavor_path, - body=jsonutils.dumps({'location': 1})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _patch_test(self, doc): - self.simulate_patch(self.flavor_path, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result = self.simulate_get(self.flavor_path, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor = jsonutils.loads(result[0]) - self._flavor_expect(flavor, self.flavor_path, doc['pool_group']) - self.assertEqual(doc['capabilities'], flavor['capabilities']) - - def test_patch_works(self): - doc = {'pool_group': 'my-pool-group', 'capabilities': {'a': 1}} - self._patch_test(doc) - - def test_patch_works_with_extra_fields(self): - doc = {'pool_group': 'my-pool-group', 'capabilities': {'a': 1}, - 'location': 100, 'partition': 'taco'} - self._patch_test(doc) - - @ddt.data(-1, 2**32+1, []) - def test_patch_raises_400_on_invalid_pool_group(self, pool_group): - self.simulate_patch(self.flavor_path, - body=jsonutils.dumps({'pool_group': pool_group})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_patch_raises_400_on_invalid_capabilities(self, capabilities): - doc = {'capabilities': capabilities} - self.simulate_patch(self.flavor_path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_patch_raises_404_if_flavor_not_found(self): - self.simulate_patch(self.url_prefix + '/flavors/notexists', - body=jsonutils.dumps({'pool_group': 'test'})) - self.assertEqual(self.srmock.status, falcon.HTTP_404) - - def test_empty_listing(self): - self.simulate_delete(self.flavor_path) - result = self.simulate_get(self.url_prefix + '/flavors') - results = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(0, len(results['flavors'])) - self.assertIn('links', results) - - def _listing_test(self, count=10, limit=10, - marker=None, detailed=False): - # NOTE(cpp-cabrera): delete initial flavor - it will interfere - # with listing tests - self.simulate_delete(self.flavor_path) - query = 'limit={0}&detailed={1}'.format(limit, detailed) - if marker: - query += '&marker={2}'.format(marker) - - with flavors(self, count, self.doc['pool_group']) as expected: - result = self.simulate_get(self.url_prefix + '/flavors', - query_string=query) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertIsInstance(results, dict) - self.assertIn('flavors', results) - self.assertIn('links', results) - flavors_list = results['flavors'] - - link = results['links'][0] - self.assertEqual('next', link['rel']) - href = falcon.uri.parse_query_string(link['href'].split('?')[1]) - self.assertIn('marker', href) - self.assertEqual(str(limit), href['limit']) - self.assertEqual(str(detailed).lower(), href['detailed']) - - next_query_string = ('marker={marker}&limit={limit}' - '&detailed={detailed}').format(**href) - next_result = self.simulate_get(link['href'].split('?')[0], - query_string=next_query_string) - next_flavors = jsonutils.loads(next_result[0]) - next_flavors_list = next_flavors['flavors'] - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertIn('links', next_flavors) - if limit < count: - self.assertEqual(min(limit, count-limit), - len(next_flavors_list)) - else: - self.assertEqual(0, len(next_flavors_list)) - - self.assertEqual(min(limit, count), len(flavors_list)) - for i, s in enumerate(flavors_list + next_flavors_list): - expect = expected[i] - path, capabilities = expect[:2] - self._flavor_expect(s, path, self.doc['pool_group']) - if detailed: - self.assertIn('capabilities', s) - self.assertEqual(s['capabilities'], capabilities) - else: - self.assertNotIn('capabilities', s) - - def test_listing_works(self): - self._listing_test() - - def test_detailed_listing_works(self): - self._listing_test(detailed=True) - - @ddt.data(1, 5, 10, 15) - def test_listing_works_with_limit(self, limit): - self._listing_test(count=15, limit=limit) - - def test_listing_marker_is_respected(self): - self.simulate_delete(self.flavor_path) - - with flavors(self, 10, self.doc['pool_group']) as expected: - result = self.simulate_get(self.url_prefix + '/flavors', - query_string='marker=3') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor_list = jsonutils.loads(result[0])['flavors'] - self.assertEqual(6, len(flavor_list)) - path, capabilities = expected[4][:2] - self._flavor_expect(flavor_list[0], path, self.doc['pool_group']) - - def test_queue_create_works(self): - metadata = {'_flavor': self.flavor} - self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_queue_create_no_flavor(self): - metadata = {'_flavor': self.flavor} - - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_health.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_health.py deleted file mode 100644 index 51c99d13..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_health.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2014 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils - -from zaqar.storage import errors -import zaqar.storage.mongodb as mongo -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestHealthMongoDB(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestHealthMongoDB, self).setUp() - - def test_basic(self): - path = self.url_prefix + '/health' - body = self.simulate_get(path) - health = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertTrue(health['storage_reachable']) - self.assertIsNotNone(health['message_volume']) - for op in health['operation_status']: - self.assertTrue(health['operation_status'][op]['succeeded']) - - @mock.patch.object(mongo.driver.DataDriver, '_health') - def test_message_volume(self, mock_driver_get): - def _health(): - KPI = {} - KPI['message_volume'] = {'free': 1, 'claimed': 2, 'total': 3} - return KPI - - mock_driver_get.side_effect = _health - - path = self.url_prefix + '/health' - body = self.simulate_get(path) - health = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - message_volume = health['message_volume'] - self.assertEqual(1, message_volume['free']) - self.assertEqual(2, message_volume['claimed']) - self.assertEqual(3, message_volume['total']) - - @mock.patch.object(mongo.messages.MessageController, 'delete') - def test_operation_status(self, mock_messages_delete): - mock_messages_delete.side_effect = errors.NotPermitted() - - path = self.url_prefix + '/health' - body = self.simulate_get(path) - health = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - op_status = health['operation_status'] - for op in op_status.keys(): - if op == 'delete_messages': - self.assertFalse(op_status[op]['succeeded']) - self.assertIsNotNone(op_status[op]['ref']) - else: - self.assertTrue(op_status[op]['succeeded']) - - -class TestHealthFaultyDriver(base.V1_1BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - path = self.url_prefix + '/health' - self.simulate_get(path) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_home.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_home.py deleted file mode 100644 index 0b4028a8..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_home.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - - -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import six.moves.urllib.parse as urlparse - -from zaqar.tests.unit.transport.wsgi import base - - -class TestHomeDocument(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - def test_json_response(self): - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '8383830383abc_' - } - body = self.simulate_get(self.url_prefix, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - content_type = self.srmock.headers_dict['Content-Type'] - self.assertEqual('application/json-home', content_type) - - try: - jsonutils.loads(body[0]) - except ValueError: - self.fail('Home document is not valid JSON') - - def test_href_template(self): - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '8383830383' - } - body = self.simulate_get(self.url_prefix, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - resp = jsonutils.loads(body[0]) - queue_href_template = resp['resources']['rel/queue']['href-template'] - path_1 = 'https://zaqar.example.com' + self.url_prefix - path_2 = 'https://zaqar.example.com' + self.url_prefix + '/' - - # Verify all the href template start with the correct version prefix - def get_href_or_template(resource): - return resource.get('href-template', '') or resource['href'] - - for resource in list(resp['resources']): - self.assertTrue( - get_href_or_template(resp['resources'][resource]). - startswith(self.url_prefix)) - - url = urlparse.urljoin(path_1, queue_href_template) - expected = ('https://zaqar.example.com' + self.url_prefix + - '/queues/foo') - self.assertEqual(expected, url.format(queue_name='foo')) - - url = urlparse.urljoin(path_2, queue_href_template) - self.assertEqual(expected, url.format(queue_name='foo')) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_media_type.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_media_type.py deleted file mode 100644 index a75855de..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_media_type.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -import falcon -from falcon import testing -from oslo_serialization import jsonutils - -from zaqar.tests.unit.transport.wsgi import base - - -class TestMediaType(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - def test_json_only_endpoints_with_wrong_accept_header(self): - endpoints = ( - ('GET', self.url_prefix + '/queues'), - ('GET', self.url_prefix + '/queues/nonexistent/stats'), - ('POST', self.url_prefix + '/queues/nonexistent/messages'), - ('GET', self.url_prefix + '/queues/nonexistent/messages/deadbeaf'), - ('POST', self.url_prefix + '/queues/nonexistent/claims'), - ('GET', self.url_prefix + '/queues/nonexistent/claims/0ad'), - ('GET', self.url_prefix + '/health'), - ) - - for method, endpoint in endpoints: - headers = { - 'Client-ID': str(uuid.uuid4()), - 'Accept': 'application/xml', - } - - env = testing.create_environ(endpoint, - method=method, - headers=headers) - - self.app(env, self.srmock) - self.assertEqual(falcon.HTTP_406, self.srmock.status) - - def test_request_with_body_and_urlencoded_contenttype_header_fails(self): - # NOTE(Eva-i): this test case makes sure wsgi 'before' hook - # "require_content_type_be_non_urlencoded" works to prevent - # bug/1547100. - eww_queue_path = self.url_prefix + '/queues/eww' - eww_queue_messages_path = eww_queue_path + '/messages' - sample_message = jsonutils.dumps({'messages': [{'body': {'eww!'}, - 'ttl': 200}]}) - bad_headers = { - 'Client-ID': str(uuid.uuid4()), - 'Content-Type': 'application/x-www-form-urlencoded', - } - - # Create queue request with bad headers. Should still work, because it - # has no body. - self.simulate_put(eww_queue_path, headers=bad_headers) - self.addCleanup(self.simulate_delete, eww_queue_path, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Post message request with good headers. Should work. - self.simulate_post(eww_queue_messages_path, body=sample_message, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Post message request with bad headers. Should not work. - self.simulate_post(eww_queue_messages_path, body=sample_message, - headers=bad_headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_messages.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_messages.py deleted file mode 100644 index 01710d46..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_messages.py +++ /dev/null @@ -1,643 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import uuid - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import six -from testtools import matchers - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base -from zaqar.transport import validation - - -@ddt.ddt -class TestMessagesMongoDB(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestMessagesMongoDB, self).setUp() - - self.default_message_ttl = self.boot.transport._defaults.message_ttl - - if self.conf.pooling: - for i in range(4): - uri = "%s/%s" % (self.mongodb_url, str(i)) - doc = {'weight': 100, 'uri': uri} - self.simulate_put(self.url_prefix + '/pools/' + str(i), - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.project_id = '7e55e1a7e' - self.headers = { - 'Client-ID': str(uuid.uuid4()), - 'X-Project-ID': self.project_id - } - - # TODO(kgriffs): Add support in self.simulate_* for a "base path" - # so that we don't have to concatenate against self.url_prefix - # all over the place. - self.queue_path = self.url_prefix + '/queues/fizbit' - self.messages_path = self.queue_path + '/messages' - - doc = '{"_ttl": 60}' - self.simulate_put(self.queue_path, body=doc, headers=self.headers) - - def tearDown(self): - self.simulate_delete(self.queue_path, headers=self.headers) - if self.conf.pooling: - for i in range(4): - self.simulate_delete(self.url_prefix + '/pools/' + str(i), - headers=self.headers) - - super(TestMessagesMongoDB, self).tearDown() - - def test_name_restrictions(self): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': 200}, - ] - messages_path = self.url_prefix + '/queues/%s/messages' - sample_doc = jsonutils.dumps({'messages': sample_messages}) - - self.simulate_post(messages_path % 'Nice-Boat_2', - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_post(messages_path % 'Nice-Bo@t', - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_post(messages_path % ('_niceboat' * 8), - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _test_post(self, sample_messages): - sample_doc = jsonutils.dumps({'messages': sample_messages}) - - result = self.simulate_post(self.messages_path, - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - - msg_ids = self._get_msg_ids(self.srmock.headers_dict) - self.assertEqual(len(sample_messages), len(msg_ids)) - - expected_resources = [six.text_type(self.messages_path + '/' + id) - for id in msg_ids] - self.assertEqual(expected_resources, result_doc['resources']) - - # NOTE(kgriffs): As of v1.1, "partial" is no longer given - # in the response document. - self.assertNotIn('partial', result_doc) - - self.assertEqual(len(sample_messages), len(msg_ids)) - - lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) - - # Test GET on the message resource directly - # NOTE(cpp-cabrera): force the passing of time to age a message - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - for msg_id in msg_ids: - message_uri = self.messages_path + '/' + msg_id - - headers = self.headers.copy() - headers['X-Project-ID'] = '777777' - # Wrong project ID - self.simulate_get(message_uri, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Correct project ID - result = self.simulate_get(message_uri, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Check message properties - message = jsonutils.loads(result[0]) - self.assertEqual(message_uri, message['href']) - self.assertEqual(lookup[message['ttl']], message['body']) - self.assertEqual(msg_id, message['id']) - - # no negative age - # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 - self.assertThat(message['age'], - matchers.GreaterThan(-1)) - - # Test bulk GET - query_string = 'ids=' + ','.join(msg_ids) - result = self.simulate_get(self.messages_path, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - expected_ttls = set(m['ttl'] for m in sample_messages) - actual_ttls = set(m['ttl'] for m in result_doc['messages']) - self.assertFalse(expected_ttls - actual_ttls) - actual_ids = set(m['id'] for m in result_doc['messages']) - self.assertFalse(set(msg_ids) - actual_ids) - - def test_exceeded_payloads(self): - # Get a valid message id - self._post_messages(self.messages_path) - msg_id = self._get_msg_id(self.srmock.headers_dict) - - # Bulk GET restriction - query_string = 'ids=' + ','.join([msg_id] * 21) - self.simulate_get(self.messages_path, - query_string=query_string, headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Listing restriction - self.simulate_get(self.messages_path, - query_string='limit=21', - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Bulk deletion restriction - query_string = 'ids=' + ','.join([msg_id] * 22) - self.simulate_delete(self.messages_path, - query_string=query_string, headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_single(self): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': 200}, - ] - - self._test_post(sample_messages) - - def test_post_multiple(self): - sample_messages = [ - {'body': 239, 'ttl': 100}, - {'body': {'key': 'value'}, 'ttl': 200}, - {'body': [1, 3], 'ttl': 300}, - ] - - self._test_post(sample_messages) - - def test_post_optional_ttl(self): - sample_messages = { - 'messages': [ - {'body': 239}, - {'body': {'key': 'value'}, 'ttl': 200}, - ], - } - - # Manually check default TTL is max from config - - sample_doc = jsonutils.dumps(sample_messages) - result = self.simulate_post(self.messages_path, - body=sample_doc, headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - - href = result_doc['resources'][0] - result = self.simulate_get(href, headers=self.headers) - message = jsonutils.loads(result[0]) - - self.assertEqual(self.default_message_ttl, message['ttl']) - - def test_post_to_non_ascii_queue(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - path = self.url_prefix + u'/queues/non-ascii-n\u0153me/messages' - - if six.PY2: - path = path.encode('utf-8') - - self._post_messages(path) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_with_long_queue_name(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - queues_path = self.url_prefix + '/queues/' - - game_title = 'v' * validation.QUEUE_NAME_MAX_LEN - self.addCleanup( - self.simulate_delete, queues_path + game_title, - headers=self.headers) - self._post_messages(queues_path + game_title + '/messages') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - game_title += 'v' - self._post_messages(queues_path + game_title + '/messages') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_to_missing_queue(self): - self.addCleanup( - self.simulate_delete, self.url_prefix + '/queues/nonexistent', - headers=self.headers) - self._post_messages(self.url_prefix + '/queues/nonexistent/messages') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_get_from_missing_queue(self): - body = self.simulate_get(self.url_prefix + - '/queues/nonexistent/messages', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') - def test_bad_client_id(self, text_id): - self.simulate_post(self.queue_path + '/messages', - body='{"ttl": 60, "body": ""}', - headers={'Client-ID': text_id}) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(self.queue_path + '/messages', - query_string='limit=3&echo=true', - headers={'Client-ID': text_id}) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(None, '[', '[]', '{}', '.') - def test_post_bad_message(self, document): - self.simulate_post(self.queue_path + '/messages', - body=document, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 59, 1209601) - def test_unacceptable_ttl(self, ttl): - doc = {'messages': [{'ttl': ttl, 'body': None}]} - - self.simulate_post(self.queue_path + '/messages', - body=jsonutils.dumps(doc), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_exceeded_message_posting(self): - # Total (raw request) size - doc = {'messages': [{'body': "some body", 'ttl': 100}] * 20} - body = jsonutils.dumps(doc, indent=4) - - max_len = self.transport_cfg.max_messages_post_size - long_body = body + (' ' * (max_len - len(body) + 1)) - - self.simulate_post(self.queue_path + '/messages', - body=long_body, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('{"overflow": 9223372036854775808}', - '{"underflow": -9223372036854775809}') - def test_unsupported_json(self, document): - self.simulate_post(self.queue_path + '/messages', - body=document, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_delete(self): - self._post_messages(self.messages_path) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - self.simulate_delete(target, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Safe to delete non-existing ones - self.simulate_delete(target, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_bulk_delete(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=5) - [target, params] = self.srmock.headers_dict['location'].split('?') - - # Deleting the whole collection is denied - self.simulate_delete(path, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Safe to delete non-existing ones - self.simulate_delete(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Even after the queue is gone - self.simulate_delete(self.queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_list(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=10) - - query_string = 'limit=3&echo=true' - body = self.simulate_get(path, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - cnt = 0 - while jsonutils.loads(body[0])['messages'] != []: - contents = jsonutils.loads(body[0]) - [target, params] = contents['links'][0]['href'].split('?') - - for msg in contents['messages']: - self.simulate_get(msg['href'], headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - body = self.simulate_get(target, - query_string=params, - headers=self.headers) - cnt += 1 - - self.assertEqual(4, cnt) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # Stats - body = self.simulate_get(self.queue_path + '/stats', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - message_stats = jsonutils.loads(body[0])['messages'] - - # NOTE(kgriffs): The other parts of the stats are tested - # in tests.storage.base and so are not repeated here. - expected_pattern = self.queue_path + '/messages/[^/]+$' - for message_stat_name in ('oldest', 'newest'): - self.assertThat(message_stats[message_stat_name]['href'], - matchers.MatchesRegex(expected_pattern)) - - # NOTE(kgriffs): Try to get messages for a missing queue - body = self.simulate_get(self.url_prefix + - '/queues/nonexistent/messages', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - def test_list_with_bad_marker(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=5) - - query_string = 'limit=3&echo=true&marker=sfhlsfdjh2048' - body = self.simulate_get(path, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - def test_no_uuid(self): - headers = { - 'Client-ID': "textid", - 'X-Project-ID': '7e7e7e' - } - path = self.queue_path + '/messages' - - self.simulate_post(path, body='[{"body": 0, "ttl": 100}]', - headers=headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_get_claimed_contains_claim_id_in_href(self): - path = self.queue_path - res = self._post_messages(path + '/messages', repeat=5) - for url in jsonutils.loads(res[0])['resources']: - message = self.simulate_get(url) - self.assertNotIn('claim_id', jsonutils.loads(message[0])['href']) - - self.simulate_post(path + '/claims', - body='{"ttl": 100, "grace": 100}', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - for url in jsonutils.loads(res[0])['resources']: - message = self.simulate_get(url) - self.assertIn('claim_id', jsonutils.loads(message[0])['href']) - - # NOTE(cpp-cabrera): regression test against bug #1210633 - def test_when_claim_deleted_then_messages_unclaimed(self): - path = self.queue_path - self._post_messages(path + '/messages', repeat=5) - - # post claim - self.simulate_post(path + '/claims', - body='{"ttl": 100, "grace": 100}', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - location = self.srmock.headers_dict['location'] - - # release claim - self.simulate_delete(location, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # get unclaimed messages - self.simulate_get(path + '/messages', - query_string='echo=true', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1203842 - def test_get_nonexistent_message_404s(self): - path = self.url_prefix + '/queues/notthere/messages/a' - self.simulate_get(path, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_multiple_invalid_messages_404s(self): - path = self.url_prefix + '/queues/notthere/messages' - self.simulate_get(path, query_string='ids=a,b,c', - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_delete_multiple_invalid_messages_204s(self): - path = self.url_prefix + '/queues/notthere/messages' - self.simulate_delete(path, query_string='ids=a,b,c', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_delete_message_with_invalid_claim_doesnt_delete_message(self): - path = self.queue_path - resp = self._post_messages(path + '/messages', 1) - location = jsonutils.loads(resp[0])['resources'][0] - - self.simulate_delete(location, query_string='claim_id=invalid', - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(location, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_no_duplicated_messages_path_in_href(self): - """Test for bug 1240897.""" - - path = self.queue_path + '/messages' - self._post_messages(path, repeat=1) - - msg_id = self._get_msg_id(self.srmock.headers_dict) - - query_string = 'ids=%s' % msg_id - body = self.simulate_get(path, - query_string=query_string, - headers=self.headers) - messages = jsonutils.loads(body[0]) - - self.assertNotIn(self.queue_path + '/messages/messages', - messages['messages'][0]['href']) - - def _post_messages(self, target, repeat=1): - doc = {'messages': [{'body': 239, 'ttl': 300}] * repeat} - - body = jsonutils.dumps(doc) - return self.simulate_post(target, body=body, headers=self.headers) - - def _get_msg_id(self, headers): - return self._get_msg_ids(headers)[0] - - def _get_msg_ids(self, headers): - return headers['location'].rsplit('=', 1)[-1].split(',') - - @ddt.data(1, 2, 10) - def test_pop(self, message_count): - - self._post_messages(self.messages_path, repeat=message_count) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - query_string = 'pop=' + str(message_count) - result = self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - - self.assertEqual(message_count, len(result_doc['messages'])) - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - @ddt.data('', 'pop=1000000', 'pop=10&ids=1', 'pop=-1') - def test_pop_invalid(self, query_string): - - self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_pop_empty_queue(self): - - query_string = 'pop=1' - result = self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - self.assertEqual([], result_doc['messages']) - - def test_pop_single_message(self): - - self._post_messages(self.messages_path, repeat=5) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Pop Single message from the queue - query_string = 'pop=1' - result = self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Get messages from the queue & verify message count - query_string = 'echo=True' - result = self.simulate_get(self.messages_path, self.project_id, - query_string=query_string, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - actual_msg_count = len(result_doc['messages']) - expected_msg_count = 4 - self.assertEqual(expected_msg_count, actual_msg_count) - - -class TestMessagesMongoDBPooled(TestMessagesMongoDB): - config_file = 'wsgi_mongodb_pooled.conf' - - # TODO(cpp-cabrera): remove this skipTest once pooled queue - # listing is implemented - def test_list(self): - self.skipTest("Need to implement pooled queue listing.") - - -class TestMessagesFaultyDriver(base.V1_1BaseFaulty): - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - project_id = 'xyz' - path = self.url_prefix + '/queues/fizbit/messages' - body = '{"messages": [{"body": 239, "ttl": 100}]}' - headers = { - 'Client-ID': str(uuid.uuid4()), - 'X-Project-ID': project_id - } - - self.simulate_post(path, - body=body, - headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(path, - headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(path + '/nonexistent', headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(path + '/nada', headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_ping.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_ping.py deleted file mode 100644 index abbbdb67..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_ping.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon - -from zaqar.tests.unit.transport.wsgi import base - - -class TestPing(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - def test_get(self): - # TODO(kgriffs): Make use of setUp for setting the URL prefix - # so we can just say something like: - # - # response = self.simulate_get('/ping') - # - response = self.simulate_get('/v1.1/ping') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual([], response) - - def test_head(self): - response = self.simulate_head('/v1.1/ping') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual([], response) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_pools.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_pools.py deleted file mode 100644 index 093755da..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_pools.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import contextlib - -import ddt -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@contextlib.contextmanager -def pool(test, name, weight, uri, group=None, options={}): - """A context manager for constructing a pool for use in testing. - - Deletes the pool after exiting the context. - - :param test: Must expose simulate_* methods - :param name: Name for this pool - :type name: six.text_type - :type weight: int - :type uri: six.text_type - :type options: dict - :returns: (name, weight, uri, options) - :rtype: see above - """ - uri = "%s/%s" % (uri, uuidutils.generate_uuid()) - doc = {'weight': weight, 'uri': uri, - 'group': group, 'options': options} - path = test.url_prefix + '/pools/' + name - - test.simulate_put(path, body=jsonutils.dumps(doc)) - test.addCleanup(test.simulate_delete, path) - - try: - yield name, weight, uri, group, options - - finally: - test.simulate_delete(path) - - -@contextlib.contextmanager -def pools(test, count, uri, group): - """A context manager for constructing pools for use in testing. - - Deletes the pools after exiting the context. - - :param test: Must expose simulate_* methods - :param count: Number of pools to create - :type count: int - :returns: (paths, weights, uris, options) - :rtype: ([six.text_type], [int], [six.text_type], [dict]) - """ - mongo_url = uri - base = test.url_prefix + '/pools/' - args = [(base + str(i), i, - {str(i): i}) - for i in range(count)] - for path, weight, option in args: - uri = "%s/%s" % (mongo_url, uuidutils.generate_uuid()) - doc = {'weight': weight, 'uri': uri, - 'group': group, 'options': option} - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield args - finally: - for path, _, _ in args: - test.simulate_delete(path) - - -@ddt.ddt -class TestPoolsMongoDB(base.V1_1Base): - - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestPoolsMongoDB, self).setUp() - self.doc = {'weight': 100, - 'group': 'mygroup', - 'uri': self.mongodb_url} - self.pool = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(self.pool, body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - super(TestPoolsMongoDB, self).tearDown() - self.simulate_delete(self.pool) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_put_pool_works(self): - name = uuidutils.generate_uuid() - weight, uri = self.doc['weight'], self.doc['uri'] - with pool(self, name, weight, uri, group='my-group'): - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_put_raises_if_missing_fields(self): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, body=jsonutils.dumps({'weight': 100})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(path, - body=jsonutils.dumps( - {'uri': self.mongodb_url})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, 'big') - def test_put_raises_if_invalid_weight(self, weight): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - doc = {'weight': weight, 'uri': 'a'} - self.simulate_put(path, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, [], 'localhost:27017') - def test_put_raises_if_invalid_uri(self, uri): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, - body=jsonutils.dumps({'weight': 1, 'uri': uri})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_put_raises_if_invalid_options(self, options): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - doc = {'weight': 1, 'uri': 'a', 'options': options} - self.simulate_put(path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_put_existing_overwrites(self): - # NOTE(cabrera): setUp creates default pool - expect = self.doc - self.simulate_put(self.pool, - body=jsonutils.dumps(expect)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result = self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - doc = jsonutils.loads(result[0]) - self.assertEqual(expect['weight'], doc['weight']) - self.assertEqual(expect['uri'], doc['uri']) - - def test_put_capabilities_mismatch_pool(self): - mongodb_doc = self.doc - self.simulate_put(self.pool, - body=jsonutils.dumps(mongodb_doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - redis_doc = {'weight': 100, - 'group': 'mygroup', - 'uri': 'redis://127.0.0.1:6379'} - - self.simulate_put(self.pool, - body=jsonutils.dumps(redis_doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_delete_works(self): - self.simulate_delete(self.pool) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_nonexisting_raises_404(self): - self.simulate_get(self.url_prefix + '/pools/nonexisting') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def _pool_expect(self, pool, xhref, xweight, xuri): - self.assertIn('href', pool) - self.assertIn('name', pool) - self.assertEqual(xhref, pool['href']) - self.assertIn('weight', pool) - self.assertEqual(xweight, pool['weight']) - self.assertIn('uri', pool) - - # NOTE(dynarro): we are using startwith because we are adding to - # pools UUIDs, to avoid dupplications - self.assertTrue(pool['uri'].startswith(xuri)) - - def test_get_works(self): - result = self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, self.doc['weight'], - self.doc['uri']) - - def test_detailed_get_works(self): - result = self.simulate_get(self.pool, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, self.doc['weight'], - self.doc['uri']) - self.assertIn('options', pool) - self.assertEqual({}, pool['options']) - - def test_patch_raises_if_missing_fields(self): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'location': 1})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _patch_test(self, doc): - self.simulate_patch(self.pool, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result = self.simulate_get(self.pool, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, doc['weight'], - doc['uri']) - self.assertEqual(doc['options'], pool['options']) - - def test_patch_works(self): - doc = {'weight': 101, - 'uri': self.mongodb_url, - 'options': {'a': 1}} - self._patch_test(doc) - - def test_patch_works_with_extra_fields(self): - doc = {'weight': 101, - 'uri': self.mongodb_url, - 'options': {'a': 1}, - 'location': 100, 'partition': 'taco'} - self._patch_test(doc) - - @ddt.data(-1, 2**32+1, 'big') - def test_patch_raises_400_on_invalid_weight(self, weight): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'weight': weight})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, [], 'localhost:27017') - def test_patch_raises_400_on_invalid_uri(self, uri): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'uri': uri})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_patch_raises_400_on_invalid_options(self, options): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'options': options})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_patch_raises_404_if_pool_not_found(self): - self.simulate_patch(self.url_prefix + '/pools/notexists', - body=jsonutils.dumps({'weight': 1})) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_empty_listing(self): - self.simulate_delete(self.pool) - result = self.simulate_get(self.url_prefix + '/pools') - results = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(0, len(results['pools'])) - self.assertIn('links', results) - - def _listing_test(self, count=10, limit=10, - marker=None, detailed=False): - # NOTE(cpp-cabrera): delete initial pool - it will interfere - # with listing tests - self.simulate_delete(self.pool) - query = 'limit={0}&detailed={1}'.format(limit, detailed) - if marker: - query += '&marker={0}'.format(marker) - - with pools(self, count, self.doc['uri'], 'my-group') as expected: - result = self.simulate_get(self.url_prefix + '/pools', - query_string=query) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertIsInstance(results, dict) - self.assertIn('pools', results) - self.assertIn('links', results) - pool_list = results['pools'] - - link = results['links'][0] - self.assertEqual('next', link['rel']) - href = falcon.uri.parse_query_string(link['href'].split('?')[1]) - self.assertIn('marker', href) - self.assertEqual(str(limit), href['limit']) - self.assertEqual(str(detailed).lower(), href['detailed']) - - next_query_string = ('marker={marker}&limit={limit}' - '&detailed={detailed}').format(**href) - next_result = self.simulate_get(link['href'].split('?')[0], - query_string=next_query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - next_pool = jsonutils.loads(next_result[0]) - next_pool_list = next_pool['pools'] - - self.assertIn('links', next_pool) - if limit < count: - self.assertEqual(min(limit, count-limit), - len(next_pool_list)) - else: - # NOTE(jeffrey4l): when limit >= count, there will be no - # pools in the 2nd page. - self.assertEqual(0, len(next_pool_list)) - - self.assertEqual(min(limit, count), len(pool_list)) - for s in pool_list + next_pool_list: - # NOTE(flwang): It can't assumed that both sqlalchemy and - # mongodb can return query result with the same order. Just - # like the order they're inserted. Actually, sqlalchemy can't - # guarantee that. So we're leveraging the relationship between - # pool weight and the index of pools fixture to get the - # right pool to verify. - expect = expected[s['weight']] - path, weight, group = expect[:3] - self._pool_expect(s, path, weight, self.doc['uri']) - if detailed: - self.assertIn('options', s) - self.assertEqual(s['options'], expect[-1]) - else: - self.assertNotIn('options', s) - - def test_listing_works(self): - self._listing_test() - - def test_detailed_listing_works(self): - self._listing_test(detailed=True) - - @ddt.data(1, 5, 10, 15) - def test_listing_works_with_limit(self, limit): - self._listing_test(count=15, limit=limit) - - def test_listing_marker_is_respected(self): - self.simulate_delete(self.pool) - - with pools(self, 10, self.doc['uri'], 'my-group') as expected: - result = self.simulate_get(self.url_prefix + '/pools', - query_string='marker=3') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool_list = jsonutils.loads(result[0])['pools'] - self.assertEqual(6, len(pool_list)) - path, weight = expected[4][:2] - self._pool_expect(pool_list[0], path, weight, self.doc['uri']) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_queue_lifecycle.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_queue_lifecycle.py deleted file mode 100644 index 8c0084b7..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_queue_lifecycle.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import six - -from zaqar.storage import errors as storage_errors -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestQueueLifecycleMongoDB(base.V1_1Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestQueueLifecycleMongoDB, self).setUp() - - self.queue_path = self.url_prefix + '/queues' - self.gumshoe_queue_path = self.queue_path + '/gumshoe' - self.fizbat_queue_path = self.queue_path + '/fizbat' - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '3387309841abc_' - } - - def tearDown(self): - storage = self.boot.storage._storage - connection = storage.connection - - connection.drop_database(self.boot.control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - - super(TestQueueLifecycleMongoDB, self).tearDown() - - def test_empty_project_id(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '' - } - - self.simulate_put(self.gumshoe_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(self.gumshoe_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('480924', 'foo') - def test_basics_thoroughly(self, project_id): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': project_id - } - gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats' - - # Stats are empty - queue not created yet - self.simulate_get(gumshoe_queue_path_stats, headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Create - doc = '{"messages": {"ttl": 600}}' - self.simulate_put(self.gumshoe_queue_path, - headers=headers, body=doc) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - location = self.srmock.headers_dict['Location'] - self.assertEqual(self.gumshoe_queue_path, location) - - # Fetch metadata - result = self.simulate_get(self.gumshoe_queue_path, - headers=headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(jsonutils.loads(doc), result_doc) - - # Stats empty queue - self.simulate_get(gumshoe_queue_path_stats, headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Delete - self.simulate_delete(self.gumshoe_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get non-existent stats - self.simulate_get(gumshoe_queue_path_stats, headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_name_restrictions(self): - self.simulate_put(self.queue_path + '/Nice-Boat_2', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.queue_path + '/Nice-Bo@t', - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_project_id_restriction(self): - muvluv_queue_path = self.queue_path + '/Muv-Luv' - - self.simulate_put(muvluv_queue_path, - headers={'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'JAM Project' * 24}) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # no charset restrictions - self.simulate_put(muvluv_queue_path, - headers={'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'JAM Project'}) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_non_ascii_name(self): - test_params = ((u'/queues/non-ascii-n\u0153me', 'utf-8'), - (u'/queues/non-ascii-n\xc4me', 'iso8859-1')) - - for uri, enc in test_params: - uri = self.url_prefix + uri - - if six.PY2: - uri = uri.encode(enc) - - self.simulate_put(uri, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(uri, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_no_metadata(self): - self.simulate_put(self.fizbat_queue_path, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.fizbat_queue_path, body='', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - @ddt.data('{', '[]', '.', ' ') - def test_bad_metadata(self, document): - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, - body=document) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_too_much_metadata(self): - self.simulate_put(self.fizbat_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size - (len(doc) - 10) + 1 - - doc = doc.format(pad='x' * padding_len) - - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, - body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_way_too_much_metadata(self): - self.simulate_put(self.fizbat_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size * 100 - - doc = doc.format(pad='x' * padding_len) - - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_custom_metadata(self): - # Set - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size - (len(doc) - 2) - - doc = doc.format(pad='x' * padding_len) - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, - body=doc) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Get - result = self.simulate_get(self.fizbat_queue_path, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(jsonutils.loads(doc), result_doc) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_update_metadata(self): - self.skip("This should use patch instead") - xyz_queue_path = self.url_prefix + '/queues/xyz' - xyz_queue_path_metadata = xyz_queue_path - - # Create - self.simulate_put(xyz_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Set meta - doc1 = '{"messages": {"ttl": 600}}' - self.simulate_put(xyz_queue_path_metadata, - headers=self.headers, - body=doc1) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Update - doc2 = '{"messages": {"ttl": 100}}' - self.simulate_put(xyz_queue_path_metadata, - headers=self.headers, - body=doc2) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get - result = self.simulate_get(xyz_queue_path_metadata, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - - self.assertEqual(jsonutils.loads(doc2), result_doc) - - def test_list(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - header = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - - # NOTE(kgriffs): It's important that this one sort after the one - # above. This is in order to prove that bug/1236605 is fixed, and - # stays fixed! - alt_project_id = str(arbitrary_number + 1) - - # List empty - result = self.simulate_get(self.queue_path, headers=header) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertEqual([], results['queues']) - self.assertIn('links', results) - self.assertEqual(0, len(results['links'])) - - # Payload exceeded - self.simulate_get(self.queue_path, headers=header, - query_string='limit=21') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Create some - def create_queue(name, project_id, body): - altheader = {'Client-ID': client_id} - if project_id is not None: - altheader['X-Project-ID'] = project_id - uri = self.queue_path + '/' + name - self.simulate_put(uri, headers=altheader, body=body) - - create_queue('q1', project_id, '{"node": 31}') - create_queue('q2', project_id, '{"node": 32}') - create_queue('q3', project_id, '{"node": 33}') - - create_queue('q3', alt_project_id, '{"alt": 1}') - - # List (limit) - result = self.simulate_get(self.queue_path, headers=header, - query_string='limit=2') - - result_doc = jsonutils.loads(result[0]) - self.assertEqual(2, len(result_doc['queues'])) - - # List (no metadata, get all) - result = self.simulate_get(self.queue_path, - headers=header, query_string='limit=5') - - result_doc = jsonutils.loads(result[0]) - [target, params] = result_doc['links'][0]['href'].split('?') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Ensure we didn't pick up the queue from the alt project. - queues = result_doc['queues'] - self.assertEqual(3, len(queues)) - - # List with metadata - result = self.simulate_get(self.queue_path, headers=header, - query_string='detailed=true') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - [target, params] = result_doc['links'][0]['href'].split('?') - - queue = result_doc['queues'][0] - result = self.simulate_get(queue['href'], headers=header) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(queue['metadata'], result_doc) - self.assertEqual({'node': 31}, result_doc) - - # List tail - self.simulate_get(target, headers=header, query_string=params) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # List manually-constructed tail - self.simulate_get(target, headers=header, query_string='marker=zzz') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_list_returns_503_on_nopoolfound_exception(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - header = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - - queue_controller = self.boot.storage.queue_controller - - with mock.patch.object(queue_controller, 'list') as mock_queue_list: - - def queue_generator(): - raise storage_errors.NoPoolFound() - - # This generator tries to be like queue controller list generator - # in some ways. - def fake_generator(): - yield queue_generator() - yield {} - mock_queue_list.return_value = fake_generator() - self.simulate_get(self.queue_path, headers=header) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - -class TestQueueLifecycleFaultyDriver(base.V1_1BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '338730984abc_1' - } - - gumshoe_queue_path = self.url_prefix + '/queues/gumshoe' - doc = '{"messages": {"ttl": 600}}' - self.simulate_put(gumshoe_queue_path, - headers=self.headers, - body=doc) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - location = ('Location', gumshoe_queue_path) - self.assertNotIn(location, self.srmock.headers) - - result = self.simulate_get(gumshoe_queue_path, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - self.assertNotEqual(result_doc, jsonutils.loads(doc)) - - self.simulate_get(gumshoe_queue_path + '/stats', - headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(self.url_prefix + '/queues', - headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(gumshoe_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v1_1/test_validation.py b/zaqar/tests/unit/transport/wsgi/v1_1/test_validation.py deleted file mode 100644 index 5f188e99..00000000 --- a/zaqar/tests/unit/transport/wsgi/v1_1/test_validation.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -import falcon - -from oslo_utils import uuidutils -from zaqar.tests.unit.transport.wsgi import base - - -class TestValidation(base.V1_1Base): - - config_file = 'wsgi_mongodb_validation.conf' - - def setUp(self): - super(TestValidation, self).setUp() - - self.project_id = '7e55e1a7e' - - self.queue_path = self.url_prefix + '/queues/noein' - self.simulate_put(self.queue_path, self.project_id) - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - def tearDown(self): - self.simulate_delete(self.queue_path, self.project_id) - super(TestValidation, self).tearDown() - - def test_metadata_deserialization(self): - # Normal case - self.simulate_put(self.queue_path, - self.project_id, - body='{"timespace": "Shangri-la"}') - - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Too long - max_queue_metadata = 64 - - doc_tmpl = '{{"Dragon Torc":"{0}"}}' - doc_tmpl_ws = '{{ "Dragon Torc" : "{0}" }}' # with whitespace - envelope_length = len(doc_tmpl.format('')) - - for tmpl in doc_tmpl, doc_tmpl_ws: - gen = '0' * (max_queue_metadata - envelope_length + 1) - doc = tmpl.format(gen) - self.simulate_put(self.queue_path, - self.project_id, - body=doc) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_message_deserialization(self): - # Normal case - body = '{"messages": [{"body": "Dragon Knights", "ttl": 100}]}' - self.simulate_post(self.queue_path + '/messages', - self.project_id, body=body, - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Both messages' size are too long - max_messages_post_size = 256 - - obj = {'a': 0, 'b': ''} - envelope_length = len(json.dumps(obj, separators=(',', ':'))) - obj['b'] = 'x' * (max_messages_post_size - envelope_length + 1) - - for long_body in ('a' * (max_messages_post_size - 2 + 1), obj): - doc = json.dumps([{'body': long_body, 'ttl': 100}]) - self.simulate_post(self.queue_path + '/messages', - self.project_id, - body=doc, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_request_without_client_id(self): - # No Client-ID in headers, it will raise 400 error. - empty_headers = {} - self.simulate_put(self.queue_path, - self.project_id, - headers=empty_headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_queue_metadata_putting(self): - # Test _default_message_ttl - # TTL normal case - queue_1 = self.url_prefix + '/queues/queue1' - self.simulate_put(queue_1, - self.project_id, - body='{"_default_message_ttl": 60}') - self.addCleanup(self.simulate_delete, queue_1, self.project_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # TTL under min - self.simulate_put(queue_1, - self.project_id, - body='{"_default_message_ttl": 59}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # TTL over max - self.simulate_put(queue_1, - self.project_id, - body='{"_default_message_ttl": 1209601}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Test _max_messages_post_size - # Size normal case - queue_2 = self.url_prefix + '/queues/queue2' - self.simulate_put(queue_2, - self.project_id, - body='{"_max_messages_post_size": 255}') - self.addCleanup(self.simulate_delete, queue_2, self.project_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Size over max - self.simulate_put(queue_2, - self.project_id, - body='{"_max_messages_post_size": 257}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/__init__.py b/zaqar/tests/unit/transport/wsgi/v2_0/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_auth.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_auth.py deleted file mode 100644 index fd385d23..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_auth.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Test Auth.""" - - -import falcon -from falcon import testing -from keystonemiddleware import auth_token -from oslo_utils import uuidutils - -from zaqar.tests.unit.transport.wsgi import base - - -class TestAuth(base.V2Base): - - config_file = 'keystone_auth.conf' - - def setUp(self): - super(TestAuth, self).setUp() - self.headers = {'Client-ID': uuidutils.generate_uuid()} - - def test_auth_install(self): - self.assertIsInstance(self.app._auth_app, auth_token.AuthProtocol) - - def test_non_authenticated(self): - env = testing.create_environ(self.url_prefix + '/480924/queues/', - method='GET', - headers=self.headers) - - self.app(env, self.srmock) - self.assertEqual(falcon.HTTP_401, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_claims.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_claims.py deleted file mode 100644 index cb16c62d..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_claims.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import json - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -from testtools import matchers - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestClaimsMongoDB(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestClaimsMongoDB, self).setUp() - - self.default_claim_ttl = self.boot.transport._defaults.claim_ttl - self.project_id = '737_abc8332832' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - self.queue_path = self.url_prefix + '/queues/fizbit' - self.claims_path = self.queue_path + '/claims' - self.messages_path = self.queue_path + '/messages' - - doc = json.dumps({"_ttl": 60}) - - self.simulate_put(self.queue_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - doc = json.dumps({'messages': [{'body': 239, 'ttl': 300}] * 10}) - self.simulate_post(self.queue_path + '/messages', - body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - storage = self.boot.storage._storage - control = self.boot.control - connection = storage.connection - - connection.drop_database(control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - - self.simulate_delete(self.queue_path, headers=self.headers) - - super(TestClaimsMongoDB, self).tearDown() - - @ddt.data('[', '[]', '.', '"fail"') - def test_bad_claim(self, doc): - self.simulate_post(self.claims_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - href = self._get_a_claim() - - self.simulate_patch(href, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_exceeded_claim(self): - self.simulate_post(self.claims_path, - body='{"ttl": 100, "grace": 60}', - query_string='limit=21', headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) - def test_unacceptable_ttl_or_grace(self, ttl_grace): - ttl, grace = ttl_grace - self.simulate_post(self.claims_path, - body=json.dumps({'ttl': ttl, 'grace': grace}), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 59, 43201) - def test_unacceptable_new_ttl(self, ttl): - href = self._get_a_claim() - - self.simulate_patch(href, - body=json.dumps({'ttl': ttl}), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_default_ttl_and_grace(self): - self.simulate_post(self.claims_path, - body='{}', headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - body = self.simulate_get(self.srmock.headers_dict['location'], - headers=self.headers) - - claim = jsonutils.loads(body[0]) - self.assertEqual(self.default_claim_ttl, claim['ttl']) - - def _get_a_claim(self): - doc = '{"ttl": 100, "grace": 60}' - self.simulate_post(self.claims_path, body=doc, headers=self.headers) - return self.srmock.headers_dict['Location'] - - def test_lifecycle(self): - doc = '{"ttl": 100, "grace": 60}' - - # First, claim some messages - body = self.simulate_post(self.claims_path, body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - claimed = jsonutils.loads(body[0])['messages'] - claim_href = self.srmock.headers_dict['Location'] - message_href, params = claimed[0]['href'].split('?') - - # No more messages to claim - self.simulate_post(self.claims_path, body=doc, - query_string='limit=3', headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Listing messages, by default, won't include claimed, will echo - body = self.simulate_get(self.messages_path, - headers=self.headers, - query_string="echo=true") - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # Listing messages, by default, won't include claimed, won't echo - body = self.simulate_get(self.messages_path, - headers=self.headers, - query_string="echo=false") - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # List messages, include_claimed, but don't echo - body = self.simulate_get(self.messages_path, - query_string='include_claimed=true' - '&echo=false', - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # List messages with a different client-id and echo=false. - # Should return some messages - headers = self.headers.copy() - headers["Client-ID"] = uuidutils.generate_uuid() - body = self.simulate_get(self.messages_path, - query_string='include_claimed=true' - '&echo=false', - headers=headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Include claimed messages this time, and echo - body = self.simulate_get(self.messages_path, - query_string='include_claimed=true' - '&echo=true', - headers=self.headers) - listed = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(len(claimed), len(listed['messages'])) - - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - body = self.simulate_get(claim_href, headers=self.headers) - - claim = jsonutils.loads(body[0]) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(100, claim['ttl']) - # NOTE(cpp-cabrera): verify that claim age is non-negative - self.assertThat(claim['age'], matchers.GreaterThan(-1)) - - # Try to delete the message without submitting a claim_id - self.simulate_delete(message_href, headers=self.headers) - self.assertEqual(falcon.HTTP_403, self.srmock.status) - - # Delete the message and its associated claim - self.simulate_delete(message_href, - query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Try to get it from the wrong project - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'bogusproject' - } - self.simulate_get(message_href, query_string=params, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Get the message - self.simulate_get(message_href, query_string=params, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Update the claim - new_claim_ttl = '{"ttl": 60, "grace": 60}' - creation = timeutils.utcnow() - self.simulate_patch(claim_href, body=new_claim_ttl, - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get the claimed messages (again) - body = self.simulate_get(claim_href, headers=self.headers) - query = timeutils.utcnow() - claim = jsonutils.loads(body[0]) - message_href, params = claim['messages'][0]['href'].split('?') - - self.assertEqual(60, claim['ttl']) - estimated_age = timeutils.delta_seconds(creation, query) - self.assertGreater(estimated_age, claim['age']) - - # Delete the claim - self.simulate_delete(claim['href'], headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Try to delete a message with an invalid claim ID - self.simulate_delete(message_href, - query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Make sure it wasn't deleted! - self.simulate_get(message_href, query_string=params, - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Try to get a claim that doesn't exist - self.simulate_get(claim['href'], headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Try to update a claim that doesn't exist - self.simulate_patch(claim['href'], body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_post_claim_nonexistent_queue(self): - path = self.url_prefix + '/queues/nonexistent/claims' - self.simulate_post(path, - body='{"ttl": 100, "grace": 60}', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_get_claim_nonexistent_queue(self): - path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba' - self.simulate_get(path, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1203842 - def test_get_nonexistent_claim_404s(self): - self.simulate_get(self.claims_path + '/a', headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_delete_nonexistent_claim_204s(self): - self.simulate_delete(self.claims_path + '/a', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_patch_nonexistent_claim_404s(self): - patch_data = json.dumps({'ttl': 100}) - self.simulate_patch(self.claims_path + '/a', body=patch_data, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - -class TestClaimsFaultyDriver(base.V2BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - self.project_id = '480924abc_' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - - claims_path = self.url_prefix + '/queues/fizbit/claims' - doc = '{"ttl": 100, "grace": 60}' - - self.simulate_post(claims_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(claims_path + '/nichts', headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_patch(claims_path + '/nichts', body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(claims_path + '/foo', headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_default_limits.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_default_limits.py deleted file mode 100644 index 228934fa..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_default_limits.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib - -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import storage -from zaqar.tests.unit.transport.wsgi import base - - -class TestDefaultLimits(base.V2Base): - - config_file = 'wsgi_mongodb_default_limits.conf' - - def setUp(self): - super(TestDefaultLimits, self).setUp() - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '%s_' % uuidutils.generate_uuid() - } - self.queue_path = self.url_prefix + '/queues' - self.q1_queue_path = self.queue_path + '/' + uuidutils.generate_uuid() - self.messages_path = self.q1_queue_path + '/messages' - self.claims_path = self.q1_queue_path + '/claims' - - self.simulate_put(self.q1_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - self.simulate_delete(self.queue_path, headers=self.headers) - super(TestDefaultLimits, self).tearDown() - - def test_queue_listing(self): - # 2 queues to list - self.simulate_put(self.queue_path + '/q2', headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - with self._prepare_queues(storage.DEFAULT_QUEUES_PER_PAGE + 1): - result = self.simulate_get(self.queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - queues = jsonutils.loads(result[0])['queues'] - self.assertEqual(storage.DEFAULT_QUEUES_PER_PAGE, len(queues)) - - def test_message_listing_different_id(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - - headers = self.headers.copy() - headers['Client-ID'] = uuidutils.generate_uuid() - result = self.simulate_get(self.messages_path, - headers=headers, - query_string='echo=false') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) - - def test_message_listing_same_id(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - result = self.simulate_get(self.messages_path, - headers=self.headers, - query_string='echo=false') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(result) - - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) - result = self.simulate_get(self.messages_path, - headers=self.headers, - query_string='echo=true') - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) - - def test_claim_creation(self): - self._prepare_messages(storage.DEFAULT_MESSAGES_PER_CLAIM + 1) - - result = self.simulate_post(self.claims_path, - body='{"ttl": 60, "grace": 60}', - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - messages = jsonutils.loads(result[0])['messages'] - self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, len(messages)) - - @contextlib.contextmanager - def _prepare_queues(self, count): - queue_paths = [self.queue_path + '/multi-{0}'.format(i) - for i in range(count)] - - for path in queue_paths: - self.simulate_put(path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - yield - - for path in queue_paths: - self.simulate_delete(path, headers=self.headers) - - def _prepare_messages(self, count): - doc = {'messages': [{'body': 239, 'ttl': 300}] * count} - body = jsonutils.dumps(doc) - self.simulate_post(self.messages_path, body=body, - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_flavors.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_flavors.py deleted file mode 100644 index 180ba78a..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_flavors.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import contextlib -import uuid - -import ddt -import falcon -from oslo_serialization import jsonutils - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@contextlib.contextmanager -def flavor(test, name, pool_group): - """A context manager for constructing a flavor for use in testing. - - Deletes the flavor after exiting the context. - - :param test: Must expose simulate_* methods - :param name: Name for this flavor - :type name: six.text_type - :type pool: six.text_type - :returns: (name, uri, capabilities) - :rtype: see above - - """ - - doc = {'pool_group': pool_group} - path = test.url_prefix + '/flavors/' + name - - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield name, pool_group - - finally: - test.simulate_delete(path) - - -@contextlib.contextmanager -def flavors(test, count, pool_group): - """A context manager for constructing flavors for use in testing. - - Deletes the flavors after exiting the context. - - :param test: Must expose simulate_* methods - :param count: Number of pools to create - :type count: int - :returns: (paths, pool_group, capabilities) - :rtype: ([six.text_type], [six.text_type], [dict]) - - """ - - base = test.url_prefix + '/flavors/' - args = sorted([(base + str(i), str(i)) for i in range(count)], - key=lambda tup: tup[1]) - for path, _ in args: - doc = {'pool_group': pool_group} - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield args - finally: - for path, _ in args: - test.simulate_delete(path) - - -@ddt.ddt -class TestFlavorsMongoDB(base.V2Base): - - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestFlavorsMongoDB, self).setUp() - self.queue = 'test-queue' - self.queue_path = self.url_prefix + '/queues/' + self.queue - - self.pool = 'mypool' - self.pool_group = 'mypool-group' - self.pool_path = self.url_prefix + '/pools/' + self.pool - self.pool_doc = {'weight': 100, - 'group': self.pool_group, - 'uri': self.mongodb_url + '/test'} - self.simulate_put(self.pool_path, body=jsonutils.dumps(self.pool_doc)) - - self.flavor = 'test-flavor' - self.doc = {'capabilities': {}, 'pool_group': self.pool_group} - self.flavor_path = self.url_prefix + '/flavors/' + self.flavor - self.simulate_put(self.flavor_path, body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - self.simulate_delete(self.queue_path) - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.simulate_delete(self.pool_path) - - super(TestFlavorsMongoDB, self).tearDown() - - def test_put_flavor_works(self): - name = str(uuid.uuid1()) - with flavor(self, name, self.doc['pool_group']): - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_put_raises_if_missing_fields(self): - path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) - self.simulate_put(path, body=jsonutils.dumps({})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(path, - body=jsonutils.dumps({'capabilities': {}})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(1, 2**32+1, []) - def test_put_raises_if_invalid_pool(self, pool_group): - path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) - self.simulate_put(path, - body=jsonutils.dumps({'pool_group': pool_group})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_put_auto_get_capabilities(self): - path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) - doc = {'pool_group': self.pool_group} - self.simulate_put(path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_put_existing_overwrites(self): - # NOTE(cabrera): setUp creates default flavor - expect = self.doc - self.simulate_put(self.flavor_path, - body=jsonutils.dumps(expect)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result = self.simulate_get(self.flavor_path) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - doc = jsonutils.loads(result[0]) - self.assertEqual(expect['pool_group'], doc['pool_group']) - - def test_create_flavor_no_pool_group(self): - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(self.pool_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - resp = self.simulate_put(self.flavor_path, - body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - self.assertEqual( - {'description': 'Flavor test-flavor could not be created. ' - 'Pool group mypool-group does not exist', - 'title': 'Unable to create'}, - jsonutils.loads(resp[0])) - - def test_delete_works(self): - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(self.flavor_path) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_nonexisting_raises_404(self): - self.simulate_get(self.url_prefix + '/flavors/nonexisting') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def _flavor_expect(self, flavor, xhref, xpool_group): - self.assertIn('href', flavor) - self.assertIn('name', flavor) - self.assertEqual(xhref, flavor['href']) - self.assertIn('pool_group', flavor) - self.assertEqual(xpool_group, flavor['pool_group']) - - def test_get_works(self): - result = self.simulate_get(self.flavor_path) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor = jsonutils.loads(result[0]) - self._flavor_expect(flavor, self.flavor_path, self.doc['pool_group']) - - store_caps = ['FIFO', 'CLAIMS', 'DURABILITY', - 'AOD', 'HIGH_THROUGHPUT'] - self.assertEqual(store_caps, flavor['capabilities']) - - def test_patch_raises_if_missing_fields(self): - self.simulate_patch(self.flavor_path, - body=jsonutils.dumps({'location': 1})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _patch_test(self, doc): - result = self.simulate_patch(self.flavor_path, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - updated_flavor = jsonutils.loads(result[0]) - self._flavor_expect(updated_flavor, self.flavor_path, - doc['pool_group']) - self.assertEqual(doc['capabilities'], updated_flavor['capabilities']) - - result = self.simulate_get(self.flavor_path) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor = jsonutils.loads(result[0]) - self._flavor_expect(flavor, self.flavor_path, doc['pool_group']) - self.assertEqual(doc['capabilities'], flavor['capabilities']) - - def test_patch_works(self): - doc = {'pool_group': 'mypoolgroup', 'capabilities': []} - self._patch_test(doc) - - def test_patch_works_with_extra_fields(self): - doc = {'pool_group': 'mypoolgroup', 'capabilities': [], - 'location': 100, 'partition': 'taco'} - self._patch_test(doc) - - @ddt.data(-1, 2**32+1, []) - def test_patch_raises_400_on_invalid_pool_group(self, pool_group): - self.simulate_patch(self.flavor_path, - body=jsonutils.dumps({'pool_group': pool_group})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_patch_raises_400_on_invalid_capabilities(self, capabilities): - doc = {'capabilities': capabilities} - self.simulate_patch(self.flavor_path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_patch_raises_404_if_flavor_not_found(self): - self.simulate_patch(self.url_prefix + '/flavors/notexists', - body=jsonutils.dumps({'pool_group': 'test'})) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_empty_listing(self): - self.simulate_delete(self.flavor_path) - result = self.simulate_get(self.url_prefix + '/flavors') - results = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(0, len(results['flavors'])) - self.assertIn('links', results) - - def _listing_test(self, count=10, limit=10, - marker=None, detailed=False): - # NOTE(cpp-cabrera): delete initial flavor - it will interfere - # with listing tests - self.simulate_delete(self.flavor_path) - query = 'limit={0}&detailed={1}'.format(limit, detailed) - if marker: - query += '&marker={2}'.format(marker) - - with flavors(self, count, self.doc['pool_group']) as expected: - result = self.simulate_get(self.url_prefix + '/flavors', - query_string=query) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertIsInstance(results, dict) - self.assertIn('flavors', results) - self.assertIn('links', results) - flavors_list = results['flavors'] - - link = results['links'][0] - self.assertEqual('next', link['rel']) - href = falcon.uri.parse_query_string(link['href'].split('?')[1]) - self.assertIn('marker', href) - self.assertEqual(str(limit), href['limit']) - self.assertEqual(str(detailed).lower(), href['detailed']) - - next_query_string = ('marker={marker}&limit={limit}' - '&detailed={detailed}').format(**href) - next_result = self.simulate_get(link['href'].split('?')[0], - query_string=next_query_string) - next_flavors = jsonutils.loads(next_result[0]) - next_flavors_list = next_flavors['flavors'] - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertIn('links', next_flavors) - if limit < count: - self.assertEqual(min(limit, count-limit), - len(next_flavors_list)) - else: - self.assertEqual(0, len(next_flavors_list)) - - self.assertEqual(min(limit, count), len(flavors_list)) - for i, s in enumerate(flavors_list + next_flavors_list): - expect = expected[i] - path = expect[0] - capabilities = ['FIFO', 'CLAIMS', 'DURABILITY', - 'AOD', 'HIGH_THROUGHPUT'] - self._flavor_expect(s, path, self.doc['pool_group']) - if detailed: - self.assertIn('capabilities', s) - self.assertEqual(s['capabilities'], capabilities) - else: - self.assertNotIn('capabilities', s) - - def test_listing_works(self): - self._listing_test() - - def test_detailed_listing_works(self): - self._listing_test(detailed=True) - - @ddt.data(1, 5, 10, 15) - def test_listing_works_with_limit(self, limit): - self._listing_test(count=15, limit=limit) - - def test_listing_marker_is_respected(self): - self.simulate_delete(self.flavor_path) - - with flavors(self, 10, self.doc['pool_group']) as expected: - result = self.simulate_get(self.url_prefix + '/flavors', - query_string='marker=3') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - flavor_list = jsonutils.loads(result[0])['flavors'] - self.assertEqual(6, len(flavor_list)) - path, capabilities = expected[4][:2] - self._flavor_expect(flavor_list[0], path, self.doc['pool_group']) - - def test_listing_error_with_invalid_limit(self): - self.simulate_delete(self.flavor_path) - query = 'limit={0}&detailed={1}'.format(0, True) - - with flavors(self, 10, self.doc['pool_group']): - self.simulate_get(self.url_prefix + '/flavors', query_string=query) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_queue_create_works(self): - metadata = {'_flavor': self.flavor} - self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_queue_create_no_flavor(self): - metadata = {'_flavor': self.flavor} - - self.simulate_delete(self.flavor_path) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_health.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_health.py deleted file mode 100644 index 6716b2dc..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_health.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2014 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils - -from zaqar.storage import errors -import zaqar.storage.mongodb as mongo -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestHealthMongoDB(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestHealthMongoDB, self).setUp() - - def test_basic(self): - path = self.url_prefix + '/health' - body = self.simulate_get(path) - health = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertTrue(health['storage_reachable']) - self.assertIsNotNone(health['message_volume']) - for op in health['operation_status']: - self.assertTrue(health['operation_status'][op]['succeeded']) - - @mock.patch.object(mongo.driver.DataDriver, '_health') - def test_message_volume(self, mock_driver_get): - def _health(): - KPI = {} - KPI['message_volume'] = {'free': 1, 'claimed': 2, 'total': 3} - return KPI - - mock_driver_get.side_effect = _health - - path = self.url_prefix + '/health' - body = self.simulate_get(path) - health = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - message_volume = health['message_volume'] - self.assertEqual(1, message_volume['free']) - self.assertEqual(2, message_volume['claimed']) - self.assertEqual(3, message_volume['total']) - - @mock.patch.object(mongo.messages.MessageController, 'delete') - def test_operation_status(self, mock_messages_delete): - mock_messages_delete.side_effect = errors.NotPermitted() - - path = self.url_prefix + '/health' - body = self.simulate_get(path) - health = jsonutils.loads(body[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - op_status = health['operation_status'] - for op in op_status.keys(): - if op == 'delete_messages': - self.assertFalse(op_status[op]['succeeded']) - self.assertIsNotNone(op_status[op]['ref']) - else: - self.assertTrue(op_status[op]['succeeded']) - - -class TestHealthFaultyDriver(base.V2BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - path = self.url_prefix + '/health' - self.simulate_get(path) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_home.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_home.py deleted file mode 100644 index c9b5213d..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_home.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - - -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import six.moves.urllib.parse as urlparse - -from zaqar.tests.unit.transport.wsgi import base - - -class TestHomeDocument(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - def test_json_response(self): - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '8383830383abc_' - } - body = self.simulate_get(self.url_prefix, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - content_type = self.srmock.headers_dict['Content-Type'] - self.assertEqual('application/json-home', content_type) - - try: - jsonutils.loads(body[0]) - except ValueError: - self.fail('Home document is not valid JSON') - - def test_href_template(self): - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '8383830383' - } - body = self.simulate_get(self.url_prefix, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - resp = jsonutils.loads(body[0]) - queue_href_template = resp['resources']['rel/queue']['href-template'] - path_1 = 'https://zaqar.example.com' + self.url_prefix - path_2 = 'https://zaqar.example.com' + self.url_prefix + '/' - - # Verify all the href template start with the correct version prefix - def get_href_or_template(resource): - return resource.get('href-template', '') or resource['href'] - - for resource in list(resp['resources']): - self.assertTrue( - get_href_or_template(resp['resources'][resource]). - startswith(self.url_prefix)) - - url = urlparse.urljoin(path_1, queue_href_template) - expected = ('https://zaqar.example.com' + self.url_prefix + - '/queues/foo') - self.assertEqual(expected, url.format(queue_name='foo')) - - url = urlparse.urljoin(path_2, queue_href_template) - self.assertEqual(expected, url.format(queue_name='foo')) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_media_type.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_media_type.py deleted file mode 100644 index 6d062da0..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_media_type.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import falcon -from falcon import testing -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar.tests.unit.transport.wsgi import base - - -class TestMediaType(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - def test_json_only_endpoints_with_wrong_accept_header(self): - endpoints = ( - ('GET', self.url_prefix + '/queues'), - ('GET', self.url_prefix + '/queues/nonexistent/stats'), - ('POST', self.url_prefix + '/queues/nonexistent/messages'), - ('GET', self.url_prefix + '/queues/nonexistent/messages/deadbeaf'), - ('POST', self.url_prefix + '/queues/nonexistent/claims'), - ('GET', self.url_prefix + '/queues/nonexistent/claims/0ad'), - ('GET', self.url_prefix + '/health'), - ) - - for method, endpoint in endpoints: - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'Accept': 'application/xml', - } - - env = testing.create_environ(endpoint, - method=method, - headers=headers) - - self.app(env, self.srmock) - self.assertEqual(falcon.HTTP_406, self.srmock.status) - - def test_request_with_body_and_urlencoded_contenttype_header_fails(self): - # NOTE(Eva-i): this test case makes sure wsgi 'before' hook - # "require_content_type_be_non_urlencoded" works to prevent - # bug/1547100. - eww_queue_path = self.url_prefix + '/queues/eww' - eww_queue_messages_path = eww_queue_path + '/messages' - sample_message = jsonutils.dumps({'messages': [{'body': {'eww!'}, - 'ttl': 200}]}) - bad_headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'Content-Type': 'application/x-www-form-urlencoded', - } - - # Create queue request with bad headers. Should still work, because it - # has no body. - self.simulate_put(eww_queue_path, headers=bad_headers) - self.addCleanup(self.simulate_delete, eww_queue_path, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Post message request with good headers. Should work. - self.simulate_post(eww_queue_messages_path, body=sample_message, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Post message request with bad headers. Should not work. - self.simulate_post(eww_queue_messages_path, body=sample_message, - headers=bad_headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_messages.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_messages.py deleted file mode 100644 index 59af85a5..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_messages.py +++ /dev/null @@ -1,685 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from testtools import matchers - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base -from zaqar.transport import validation - - -@ddt.ddt -class TestMessagesMongoDB(base.V2Base): - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestMessagesMongoDB, self).setUp() - - self.default_message_ttl = self.boot.transport._defaults.message_ttl - - if self.conf.pooling: - uri = self.mongodb_url - for i in range(4): - db_name = "zaqar_test_pools_" + str(i) - # NOTE(dynarro): we need to create a unique uri. - uri = "%s/%s" % (uri, db_name) - options = {'database': db_name} - doc = {'weight': 100, 'uri': uri, 'options': options} - self.simulate_put(self.url_prefix + '/pools/' + str(i), - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.project_id = '7e55e1a7e' - self.headers.update({ - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - }) - - # TODO(kgriffs): Add support in self.simulate_* for a "base path" - # so that we don't have to concatenate against self.url_prefix - # all over the place. - self.queue_path = self.url_prefix + '/queues/fizbit' - self.messages_path = self.queue_path + '/messages' - - doc = '{"_ttl": 60}' - self.simulate_put(self.queue_path, body=doc, headers=self.headers) - - def tearDown(self): - self.simulate_delete(self.queue_path, headers=self.headers) - if self.conf.pooling: - for i in range(4): - self.simulate_delete(self.url_prefix + '/pools/' + str(i), - headers=self.headers) - - super(TestMessagesMongoDB, self).tearDown() - - def test_name_restrictions(self): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': 200}, - ] - messages_path = self.url_prefix + '/queues/%s/messages' - sample_doc = jsonutils.dumps({'messages': sample_messages}) - - self.simulate_post(messages_path % 'Nice-Boat_2', - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_post(messages_path % 'Nice-Bo@t', - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_post(messages_path % ('_niceboat' * 8), - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _test_post(self, sample_messages): - sample_doc = jsonutils.dumps({'messages': sample_messages}) - - result = self.simulate_post(self.messages_path, - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - - msg_ids = self._get_msg_ids(self.srmock.headers_dict) - self.assertEqual(len(sample_messages), len(msg_ids)) - - expected_resources = [six.text_type(self.messages_path + '/' + id) - for id in msg_ids] - self.assertEqual(expected_resources, result_doc['resources']) - - # NOTE(kgriffs): As of v1.1, "partial" is no longer given - # in the response document. - self.assertNotIn('partial', result_doc) - - self.assertEqual(len(sample_messages), len(msg_ids)) - - lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) - - # Test GET on the message resource directly - # NOTE(cpp-cabrera): force the passing of time to age a message - timeutils_utcnow = 'oslo_utils.timeutils.utcnow' - now = timeutils.utcnow() + datetime.timedelta(seconds=10) - with mock.patch(timeutils_utcnow) as mock_utcnow: - mock_utcnow.return_value = now - for msg_id in msg_ids: - message_uri = self.messages_path + '/' + msg_id - - headers = self.headers.copy() - headers['X-Project-ID'] = '777777' - # Wrong project ID - self.simulate_get(message_uri, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Correct project ID - result = self.simulate_get(message_uri, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Check message properties - message = jsonutils.loads(result[0]) - self.assertEqual(message_uri, message['href']) - self.assertEqual(lookup[message['ttl']], message['body']) - self.assertEqual(msg_id, message['id']) - - # no negative age - # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 - self.assertThat(message['age'], - matchers.GreaterThan(-1)) - - # Test bulk GET - query_string = 'ids=' + ','.join(msg_ids) - result = self.simulate_get(self.messages_path, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - expected_ttls = set(m['ttl'] for m in sample_messages) - actual_ttls = set(m['ttl'] for m in result_doc['messages']) - self.assertFalse(expected_ttls - actual_ttls) - actual_ids = set(m['id'] for m in result_doc['messages']) - self.assertFalse(set(msg_ids) - actual_ids) - - def test_exceeded_payloads(self): - # Get a valid message id - self._post_messages(self.messages_path) - msg_id = self._get_msg_id(self.srmock.headers_dict) - - # Bulk GET restriction - query_string = 'ids=' + ','.join([msg_id] * 21) - self.simulate_get(self.messages_path, - query_string=query_string, headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Listing restriction - self.simulate_get(self.messages_path, - query_string='limit=21', - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Bulk deletion restriction - query_string = 'ids=' + ','.join([msg_id] * 22) - self.simulate_delete(self.messages_path, - query_string=query_string, headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_single(self): - sample_messages = [ - {'body': {'key': 'value'}, 'ttl': 200}, - ] - - self._test_post(sample_messages) - - def test_post_multiple(self): - sample_messages = [ - {'body': 239, 'ttl': 100}, - {'body': {'key': 'value'}, 'ttl': 200}, - {'body': [1, 3], 'ttl': 300}, - ] - - self._test_post(sample_messages) - - def test_post_optional_ttl(self): - sample_messages = { - 'messages': [ - {'body': 239}, - {'body': {'key': 'value'}, 'ttl': 200}, - ], - } - - # Manually check default TTL is max from config - - sample_doc = jsonutils.dumps(sample_messages) - result = self.simulate_post(self.messages_path, - body=sample_doc, headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - - href = result_doc['resources'][0] - result = self.simulate_get(href, headers=self.headers) - message = jsonutils.loads(result[0]) - - self.assertEqual(self.default_message_ttl, message['ttl']) - - def test_post_to_non_ascii_queue(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - path = self.url_prefix + u'/queues/non-ascii-n\u0153me/messages' - - if six.PY2: - path = path.encode('utf-8') - - self._post_messages(path) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_with_long_queue_name(self): - # NOTE(kgriffs): This test verifies that routes with - # embedded queue name params go through the validation - # hook, regardless of the target resource. - - queues_path = self.url_prefix + '/queues/' - - game_title = 'v' * validation.QUEUE_NAME_MAX_LEN - self.addCleanup( - self.simulate_delete, queues_path + game_title, - headers=self.headers) - self._post_messages(queues_path + game_title + '/messages') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - game_title += 'v' - self._post_messages(queues_path + game_title + '/messages') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_post_to_missing_queue(self): - self.addCleanup( - self.simulate_delete, self.url_prefix + '/queues/nonexistent', - headers=self.headers) - self._post_messages(self.url_prefix + '/queues/nonexistent/messages') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_post_using_queue_default_message_ttl(self): - queue_path = self.url_prefix + '/queues/test_queue1' - messages_path = queue_path + '/messages' - doc = '{"_default_message_ttl": 999}' - self.simulate_put(queue_path, body=doc, headers=self.headers) - self.addCleanup(self.simulate_delete, queue_path, headers=self.headers) - sample_messages = { - 'messages': [ - {'body': {'key': 'value'}}, - ], - } - - sample_doc = jsonutils.dumps(sample_messages) - result = self.simulate_post(messages_path, - body=sample_doc, headers=self.headers) - result_doc = jsonutils.loads(result[0]) - href = result_doc['resources'][0] - result = self.simulate_get(href, headers=self.headers) - message = jsonutils.loads(result[0]) - - self.assertEqual(999, message['ttl']) - - def test_post_using_queue_max_messages_post_size(self): - queue_path = self.url_prefix + '/queues/test_queue2' - messages_path = queue_path + '/messages' - doc = '{"_max_messages_post_size": 1023}' - self.simulate_put(queue_path, body=doc, headers=self.headers) - self.addCleanup(self.simulate_delete, queue_path, headers=self.headers) - sample_messages = { - 'messages': [ - {'body': {'key': 'a' * 1204}}, - ], - } - - sample_doc = jsonutils.dumps(sample_messages) - self.simulate_post(messages_path, - body=sample_doc, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_get_from_missing_queue(self): - body = self.simulate_get(self.url_prefix + - '/queues/nonexistent/messages', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') - def test_bad_client_id(self, text_id): - self.simulate_post(self.queue_path + '/messages', - body='{"ttl": 60, "body": ""}', - headers={'Client-ID': text_id}) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(self.queue_path + '/messages', - query_string='limit=3&echo=true', - headers={'Client-ID': text_id}) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(None, '[', '[]', '{}', '.') - def test_post_bad_message(self, document): - self.simulate_post(self.queue_path + '/messages', - body=document, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 59, 1209601) - def test_unacceptable_ttl(self, ttl): - doc = {'messages': [{'ttl': ttl, 'body': None}]} - - self.simulate_post(self.queue_path + '/messages', - body=jsonutils.dumps(doc), - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_exceeded_message_posting(self): - # Total (raw request) size - doc = {'messages': [{'body': "some body", 'ttl': 100}] * 20} - body = jsonutils.dumps(doc, indent=4) - - max_len = self.transport_cfg.max_messages_post_size - long_body = body + (' ' * (max_len - len(body) + 1)) - - self.simulate_post(self.queue_path + '/messages', - body=long_body, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('{"overflow": 9223372036854775808}', - '{"underflow": -9223372036854775809}') - def test_unsupported_json(self, document): - self.simulate_post(self.queue_path + '/messages', - body=document, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_delete(self): - self._post_messages(self.messages_path) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - self.simulate_delete(target, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Safe to delete non-existing ones - self.simulate_delete(target, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_bulk_delete(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=5) - [target, params] = self.srmock.headers_dict['location'].split('?') - - # Deleting the whole collection is denied - self.simulate_delete(path, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Safe to delete non-existing ones - self.simulate_delete(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Even after the queue is gone - self.simulate_delete(self.queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_delete(target, query_string=params, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_list(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=10) - - query_string = 'limit=3&echo=true' - body = self.simulate_get(path, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - cnt = 0 - while jsonutils.loads(body[0])['messages'] != []: - contents = jsonutils.loads(body[0]) - [target, params] = contents['links'][0]['href'].split('?') - - for msg in contents['messages']: - self.simulate_get(msg['href'], headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - body = self.simulate_get(target, - query_string=params, - headers=self.headers) - cnt += 1 - - self.assertEqual(4, cnt) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - # Stats - body = self.simulate_get(self.queue_path + '/stats', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - message_stats = jsonutils.loads(body[0])['messages'] - - # NOTE(kgriffs): The other parts of the stats are tested - # in tests.storage.base and so are not repeated here. - expected_pattern = self.queue_path + '/messages/[^/]+$' - for message_stat_name in ('oldest', 'newest'): - self.assertThat(message_stats[message_stat_name]['href'], - matchers.MatchesRegex(expected_pattern)) - - # NOTE(kgriffs): Try to get messages for a missing queue - body = self.simulate_get(self.url_prefix + - '/queues/nonexistent/messages', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - def test_list_with_bad_marker(self): - path = self.queue_path + '/messages' - self._post_messages(path, repeat=5) - - query_string = 'limit=3&echo=true&marker=sfhlsfdjh2048' - body = self.simulate_get(path, - query_string=query_string, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self._empty_message_list(body) - - def test_no_uuid(self): - headers = { - 'Client-ID': "textid", - 'X-Project-ID': '7e7e7e' - } - path = self.queue_path + '/messages' - - self.simulate_post(path, body='[{"body": 0, "ttl": 100}]', - headers=headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_get_claimed_contains_claim_id_in_href(self): - path = self.queue_path - res = self._post_messages(path + '/messages', repeat=5) - for url in jsonutils.loads(res[0])['resources']: - message = self.simulate_get(url) - self.assertNotIn('claim_id', jsonutils.loads(message[0])['href']) - - self.simulate_post(path + '/claims', - body='{"ttl": 100, "grace": 100}', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - for url in jsonutils.loads(res[0])['resources']: - message = self.simulate_get(url) - self.assertIn('claim_id', jsonutils.loads(message[0])['href']) - - # NOTE(cpp-cabrera): regression test against bug #1210633 - def test_when_claim_deleted_then_messages_unclaimed(self): - path = self.queue_path - self._post_messages(path + '/messages', repeat=5) - - # post claim - self.simulate_post(path + '/claims', - body='{"ttl": 100, "grace": 100}', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - location = self.srmock.headers_dict['location'] - - # release claim - self.simulate_delete(location, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # get unclaimed messages - self.simulate_get(path + '/messages', - query_string='echo=true', - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # NOTE(cpp-cabrera): regression test against bug #1203842 - def test_get_nonexistent_message_404s(self): - path = self.url_prefix + '/queues/notthere/messages/a' - self.simulate_get(path, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_multiple_invalid_messages_404s(self): - path = self.url_prefix + '/queues/notthere/messages' - self.simulate_get(path, query_string='ids=a,b,c', - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_delete_multiple_invalid_messages_204s(self): - path = self.url_prefix + '/queues/notthere/messages' - self.simulate_delete(path, query_string='ids=a,b,c', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_delete_message_with_invalid_claim_doesnt_delete_message(self): - path = self.queue_path - resp = self._post_messages(path + '/messages', 1) - location = jsonutils.loads(resp[0])['resources'][0] - - self.simulate_delete(location, query_string='claim_id=invalid', - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_get(location, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_no_duplicated_messages_path_in_href(self): - """Test for bug 1240897.""" - - path = self.queue_path + '/messages' - self._post_messages(path, repeat=1) - - msg_id = self._get_msg_id(self.srmock.headers_dict) - - query_string = 'ids=%s' % msg_id - body = self.simulate_get(path, - query_string=query_string, - headers=self.headers) - messages = jsonutils.loads(body[0]) - - self.assertNotIn(self.queue_path + '/messages/messages', - messages['messages'][0]['href']) - - def _post_messages(self, target, repeat=1): - doc = {'messages': [{'body': 239, 'ttl': 300}] * repeat} - - body = jsonutils.dumps(doc) - return self.simulate_post(target, body=body, headers=self.headers) - - def _get_msg_id(self, headers): - return self._get_msg_ids(headers)[0] - - def _get_msg_ids(self, headers): - return headers['location'].rsplit('=', 1)[-1].split(',') - - @ddt.data(1, 2, 10) - def test_pop(self, message_count): - - self._post_messages(self.messages_path, repeat=message_count) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - query_string = 'pop=' + str(message_count) - result = self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - - self.assertEqual(message_count, len(result_doc['messages'])) - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - @ddt.data('', 'pop=1000000', 'pop=10&ids=1', 'pop=-1') - def test_pop_invalid(self, query_string): - - self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_pop_empty_queue(self): - - query_string = 'pop=1' - result = self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - result_doc = jsonutils.loads(result[0]) - self.assertEqual([], result_doc['messages']) - - def test_pop_single_message(self): - - self._post_messages(self.messages_path, repeat=5) - msg_id = self._get_msg_id(self.srmock.headers_dict) - target = self.messages_path + '/' + msg_id - - self.simulate_get(target, self.project_id) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Pop Single message from the queue - query_string = 'pop=1' - result = self.simulate_delete(self.messages_path, self.project_id, - query_string=query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Get messages from the queue & verify message count - query_string = 'echo=True' - result = self.simulate_get(self.messages_path, self.project_id, - query_string=query_string, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - actual_msg_count = len(result_doc['messages']) - expected_msg_count = 4 - self.assertEqual(expected_msg_count, actual_msg_count) - - -class TestMessagesMongoDBPooled(TestMessagesMongoDB): - config_file = 'wsgi_mongodb_pooled.conf' - - # TODO(cpp-cabrera): remove this skipTest once pooled queue - # listing is implemented - def test_list(self): - self.skipTest("Need to implement pooled queue listing.") - - -class TestMessagesFaultyDriver(base.V2BaseFaulty): - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - project_id = 'xyz' - path = self.url_prefix + '/queues/fizbit/messages' - body = '{"messages": [{"body": 239, "ttl": 100}]}' - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': project_id - } - - self.simulate_post(path, - body=body, - headers=headers) - self.assertEqual(falcon.HTTP_500, self.srmock.status) - - self.simulate_get(path, - headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(path + '/nonexistent', headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(path + '/nada', headers=headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_ping.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_ping.py deleted file mode 100644 index 1be9c477..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_ping.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon - -from zaqar.tests.unit.transport.wsgi import base - - -class TestPing(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - def test_get(self): - # TODO(kgriffs): Make use of setUp for setting the URL prefix - # so we can just say something like: - # - # response = self.simulate_get('/ping') - # - response = self.simulate_get('/v2/ping') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual([], response) - - def test_head(self): - response = self.simulate_head('/v2/ping') - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual([], response) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_pools.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_pools.py deleted file mode 100644 index 454dd31b..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_pools.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import contextlib - -import ddt -import falcon -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@contextlib.contextmanager -def pool(test, name, weight, uri, group=None, options={}): - """A context manager for constructing a pool for use in testing. - - Deletes the pool after exiting the context. - - :param test: Must expose simulate_* methods - :param name: Name for this pool - :type name: six.text_type - :type weight: int - :type uri: six.text_type - :type options: dict - :returns: (name, weight, uri, options) - :rtype: see above - """ - uri = "%s/%s" % (uri, uuidutils.generate_uuid()) - doc = {'weight': weight, 'uri': uri, - 'group': group, 'options': options} - path = test.url_prefix + '/pools/' + name - - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield name, weight, uri, group, options - - finally: - test.simulate_delete(path) - - -@contextlib.contextmanager -def pools(test, count, uri, group): - """A context manager for constructing pools for use in testing. - - Deletes the pools after exiting the context. - - :param test: Must expose simulate_* methods - :param count: Number of pools to create - :type count: int - :returns: (paths, weights, uris, options) - :rtype: ([six.text_type], [int], [six.text_type], [dict]) - """ - mongo_url = uri - base = test.url_prefix + '/pools/' - args = [(base + str(i), i, - {str(i): i}) - for i in range(count)] - for path, weight, option in args: - uri = "%s/%s" % (mongo_url, uuidutils.generate_uuid()) - doc = {'weight': weight, 'uri': uri, - 'group': group, 'options': option} - test.simulate_put(path, body=jsonutils.dumps(doc)) - - try: - yield args - finally: - for path, _, _ in args: - test.simulate_delete(path) - - -@ddt.ddt -class TestPoolsMongoDB(base.V2Base): - - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestPoolsMongoDB, self).setUp() - self.doc = {'weight': 100, - 'group': 'mygroup', - 'uri': self.mongodb_url} - self.pool = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(self.pool, body=jsonutils.dumps(self.doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def tearDown(self): - super(TestPoolsMongoDB, self).tearDown() - self.simulate_delete(self.pool) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_put_pool_works(self): - name = uuidutils.generate_uuid() - weight, uri = self.doc['weight'], self.doc['uri'] - with pool(self, name, weight, uri, group='my-group'): - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_put_raises_if_missing_fields(self): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, body=jsonutils.dumps({'weight': 100})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(path, - body=jsonutils.dumps( - {'uri': self.mongodb_url})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, 'big') - def test_put_raises_if_invalid_weight(self, weight): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - doc = {'weight': weight, 'uri': 'a'} - self.simulate_put(path, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, [], 'localhost:27017') - def test_put_raises_if_invalid_uri(self, uri): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, - body=jsonutils.dumps({'weight': 1, 'uri': uri})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_put_raises_if_invalid_options(self, options): - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - doc = {'weight': 1, 'uri': 'a', 'options': options} - self.simulate_put(path, body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_put_same_database_uri(self): - # NOTE(cabrera): setUp creates default pool - expect = self.doc - path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() - self.simulate_put(path, body=jsonutils.dumps(expect)) - self.assertEqual(falcon.HTTP_409, self.srmock.status) - - def test_put_existing_overwrites(self): - # NOTE(cabrera): setUp creates default pool - expect = self.doc - self.simulate_put(self.pool, - body=jsonutils.dumps(expect)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - result = self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - doc = jsonutils.loads(result[0]) - self.assertEqual(expect['weight'], doc['weight']) - self.assertEqual(expect['uri'], doc['uri']) - - def test_put_capabilities_mismatch_pool(self): - mongodb_doc = self.doc - self.simulate_put(self.pool, - body=jsonutils.dumps(mongodb_doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - redis_doc = {'weight': 100, - 'group': 'mygroup', - 'uri': 'redis://127.0.0.1:6379'} - - self.simulate_put(self.pool, - body=jsonutils.dumps(redis_doc)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_delete_works(self): - self.simulate_delete(self.pool) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_get_nonexisting_raises_404(self): - self.simulate_get(self.url_prefix + '/pools/nonexisting') - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def _pool_expect(self, pool, xhref, xweight, xuri): - self.assertIn('href', pool) - self.assertIn('name', pool) - self.assertEqual(xhref, pool['href']) - self.assertIn('weight', pool) - self.assertEqual(xweight, pool['weight']) - self.assertIn('uri', pool) - - # NOTE(dynarro): we are using startwith because we are adding to - # pools UUIDs, to avoid dupplications - self.assertTrue(pool['uri'].startswith(xuri)) - - def test_get_works(self): - result = self.simulate_get(self.pool) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, self.doc['weight'], - self.doc['uri']) - - def test_detailed_get_works(self): - result = self.simulate_get(self.pool, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, self.doc['weight'], - self.doc['uri']) - self.assertIn('options', pool) - self.assertEqual({}, pool['options']) - - def test_patch_raises_if_missing_fields(self): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'location': 1})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def _patch_test(self, doc): - result = self.simulate_patch(self.pool, - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - updated_pool = jsonutils.loads(result[0]) - self._pool_expect(updated_pool, self.pool, doc['weight'], - doc['uri']) - - result = self.simulate_get(self.pool, - query_string='detailed=True') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool = jsonutils.loads(result[0]) - self._pool_expect(pool, self.pool, doc['weight'], - doc['uri']) - self.assertEqual(doc['options'], pool['options']) - - def test_patch_works(self): - doc = {'weight': 101, - 'uri': self.mongodb_url, - 'options': {'a': 1}} - self._patch_test(doc) - - def test_patch_works_with_extra_fields(self): - doc = {'weight': 101, - 'uri': self.mongodb_url, - 'options': {'a': 1}, - 'location': 100, - 'partition': 'taco'} - self._patch_test(doc) - - @ddt.data(-1, 2**32+1, 'big') - def test_patch_raises_400_on_invalid_weight(self, weight): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'weight': weight})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 2**32+1, [], 'localhost:27017') - def test_patch_raises_400_on_invalid_uri(self, uri): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'uri': uri})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data(-1, 'wee', []) - def test_patch_raises_400_on_invalid_options(self, options): - self.simulate_patch(self.pool, - body=jsonutils.dumps({'options': options})) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_patch_raises_404_if_pool_not_found(self): - self.simulate_patch(self.url_prefix + '/pools/notexists', - body=jsonutils.dumps({'weight': 1})) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_empty_listing(self): - self.simulate_delete(self.pool) - result = self.simulate_get(self.url_prefix + '/pools') - results = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertEqual(0, len(results['pools'])) - self.assertIn('links', results) - - def _listing_test(self, count=10, limit=10, - marker=None, detailed=False): - # NOTE(cpp-cabrera): delete initial pool - it will interfere - # with listing tests - self.simulate_delete(self.pool) - query = 'limit={0}&detailed={1}'.format(limit, detailed) - if marker: - query += '&marker={0}'.format(marker) - - with pools(self, count, self.doc['uri'], 'my-group') as expected: - result = self.simulate_get(self.url_prefix + '/pools', - query_string=query) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertIsInstance(results, dict) - self.assertIn('pools', results) - self.assertIn('links', results) - pool_list = results['pools'] - - link = results['links'][0] - self.assertEqual('next', link['rel']) - href = falcon.uri.parse_query_string(link['href'].split('?')[1]) - self.assertIn('marker', href) - self.assertEqual(str(limit), href['limit']) - self.assertEqual(str(detailed).lower(), href['detailed']) - - next_query_string = ('marker={marker}&limit={limit}' - '&detailed={detailed}').format(**href) - next_result = self.simulate_get(link['href'].split('?')[0], - query_string=next_query_string) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - next_pool = jsonutils.loads(next_result[0]) - next_pool_list = next_pool['pools'] - - self.assertIn('links', next_pool) - if limit < count: - self.assertEqual(min(limit, count-limit), - len(next_pool_list)) - else: - # NOTE(jeffrey4l): when limit >= count, there will be no - # pools in the 2nd page. - self.assertEqual(0, len(next_pool_list)) - - self.assertEqual(min(limit, count), len(pool_list)) - for s in pool_list + next_pool_list: - # NOTE(flwang): It can't assumed that both sqlalchemy and - # mongodb can return query result with the same order. Just - # like the order they're inserted. Actually, sqlalchemy can't - # guarantee that. So we're leveraging the relationship between - # pool weight and the index of pools fixture to get the - # right pool to verify. - expect = expected[s['weight']] - path, weight, group = expect[:3] - self._pool_expect(s, path, weight, self.doc['uri']) - if detailed: - self.assertIn('options', s) - self.assertEqual(s['options'], expect[-1]) - else: - self.assertNotIn('options', s) - - def test_listing_works(self): - self._listing_test() - - def test_detailed_listing_works(self): - self._listing_test(detailed=True) - - @ddt.data(1, 5, 10, 15) - def test_listing_works_with_limit(self, limit): - self._listing_test(count=15, limit=limit) - - def test_listing_marker_is_respected(self): - self.simulate_delete(self.pool) - - with pools(self, 10, self.doc['uri'], 'my-group') as expected: - result = self.simulate_get(self.url_prefix + '/pools', - query_string='marker=3') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - pool_list = jsonutils.loads(result[0])['pools'] - self.assertEqual(6, len(pool_list)) - path, weight = expected[4][:2] - self._pool_expect(pool_list[0], path, weight, self.doc['uri']) - - def test_listing_error_with_invalid_limit(self): - self.simulate_delete(self.pool) - query = 'limit={0}&detailed={1}'.format(0, True) - - with pools(self, 10, self.doc['uri'], 'my-group'): - self.simulate_get(self.url_prefix + '/pools', query_string=query) - self.assertEqual(falcon.HTTP_400, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_purge.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_purge.py deleted file mode 100644 index 4ca8e154..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_purge.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon - -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar.tests.unit.transport.wsgi import base - - -class TestPurge(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - def setUp(self): - super(TestPurge, self).setUp() - - self.headers = { - 'Client-ID': uuidutils.generate_uuid() - } - self.queue_path = self.url_prefix + '/queues/myqueue' - self.messages_path = self.queue_path + '/messages' - self.subscription_path = (self.queue_path + '/subscriptions') - - self.messages = {'messages': [{'body': 'A', 'ttl': 300}, - {'body': 'B', 'ttl': 400}, - {'body': 'C', 'ttl': 500}]} - self.subscriptions = {"subscriber": "http://ping.me", "ttl": 3600, - "options": {"key": "value"}} - - def tearDown(self): - self.simulate_delete(self.queue_path, headers=self.headers) - super(TestPurge, self).tearDown() - - def _get_msg_id(self, headers): - return self._get_msg_ids(headers)[0] - - def _get_msg_ids(self, headers): - return headers['location'].rsplit('=', 1)[-1].split(',') - - def test_purge_particular_resource(self): - # Post messages - messages_body = jsonutils.dumps(self.messages) - self.simulate_post(self.messages_path, body=messages_body, - headers=self.headers) - - msg_ids = self._get_msg_ids(self.srmock.headers_dict) - for msg_id in msg_ids: - target = self.messages_path + '/' + msg_id - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Post subscriptions - sub_resp = self.simulate_post(self.subscription_path, - body=jsonutils.dumps(self.subscriptions), - headers=self.headers) - - # Purge queue - purge_body = jsonutils.dumps({'resource_types': ['messages']}) - self.simulate_post(self.queue_path+"/purge", body=purge_body) - - for msg_id in msg_ids: - target = self.messages_path + '/' + msg_id - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Check subscriptions are still there - resp_list = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_list_doc = jsonutils.loads(resp_list[0]) - sid = resp_list_doc['subscriptions'][0]['id'] - sub_resp_doc = jsonutils.loads(sub_resp[0]) - self.assertEqual(sub_resp_doc['subscription_id'], sid) - - def test_purge_by_default(self): - # Post messages - messages_body = jsonutils.dumps(self.messages) - self.simulate_post(self.messages_path, body=messages_body, - headers=self.headers) - - msg_ids = self._get_msg_ids(self.srmock.headers_dict) - for msg_id in msg_ids: - target = self.messages_path + '/' + msg_id - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Post subscriptions - sub_resp = self.simulate_post(self.subscription_path, - body=jsonutils.dumps(self.subscriptions), - headers=self.headers) - - # Purge queue - purge_body = jsonutils.dumps({'resource_types': ['messages', - 'subscriptions']}) - self.simulate_post(self.queue_path+"/purge", body=purge_body) - - for msg_id in msg_ids: - target = self.messages_path + '/' + msg_id - self.simulate_get(target, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # Check subscriptions are still there - sub_id = jsonutils.loads(sub_resp[0])['subscription_id'] - self.simulate_get(self.subscription_path + "/" + sub_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py deleted file mode 100644 index 08739c11..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import six - -from zaqar.storage import errors as storage_errors -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestQueueLifecycleMongoDB(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestQueueLifecycleMongoDB, self).setUp() - - self.queue_path = self.url_prefix + '/queues' - self.gumshoe_queue_path = self.queue_path + '/gumshoe' - self.fizbat_queue_path = self.queue_path + '/fizbat' - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '3387309841abc_' - } - - def tearDown(self): - control = self.boot.control - storage = self.boot.storage._storage - connection = storage.connection - - connection.drop_database(control.queues_database) - - for db in storage.message_databases: - connection.drop_database(db) - - super(TestQueueLifecycleMongoDB, self).tearDown() - - def test_without_project_id(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - self.simulate_put(self.gumshoe_queue_path, headers=headers, - need_project_id=False) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(self.gumshoe_queue_path, headers=headers, - need_project_id=False) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_empty_project_id(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '' - } - - self.simulate_put(self.gumshoe_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(self.gumshoe_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - @ddt.data('480924', 'foo') - def test_basics_thoroughly(self, project_id): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': project_id - } - gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats' - - # Stats are empty - queue not created yet - self.simulate_get(gumshoe_queue_path_stats, headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Create - doc = '{"messages": {"ttl": 600}}' - self.simulate_put(self.gumshoe_queue_path, - headers=headers, body=doc) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - location = self.srmock.headers_dict['Location'] - self.assertEqual(location, self.gumshoe_queue_path) - - # Fetch metadata - result = self.simulate_get(self.gumshoe_queue_path, - headers=headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - ref_doc = jsonutils.loads(doc) - ref_doc['_default_message_ttl'] = 3600 - ref_doc['_max_messages_post_size'] = 262144 - self.assertEqual(ref_doc, result_doc) - - # Stats empty queue - self.simulate_get(gumshoe_queue_path_stats, headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Delete - self.simulate_delete(self.gumshoe_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Get non-existent stats - self.simulate_get(gumshoe_queue_path_stats, headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_name_restrictions(self): - self.simulate_put(self.queue_path + '/Nice-Boat_2', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.queue_path + '/Nice-Bo@t', - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_put(self.queue_path + '/Service.test_queue', - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_project_id_restriction(self): - muvluv_queue_path = self.queue_path + '/Muv-Luv' - - self.simulate_put(muvluv_queue_path, - headers={'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'JAM Project' * 24}) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # no charset restrictions - self.simulate_put(muvluv_queue_path, - headers={'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': 'JAM Project'}) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - def test_non_ascii_name(self): - test_params = ((u'/queues/non-ascii-n\u0153me', 'utf-8'), - (u'/queues/non-ascii-n\xc4me', 'iso8859-1')) - - for uri, enc in test_params: - uri = self.url_prefix + uri - - if six.PY2: - uri = uri.encode(enc) - - self.simulate_put(uri, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_delete(uri, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_no_metadata(self): - self.simulate_put(self.fizbat_queue_path, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.simulate_put(self.fizbat_queue_path, body='', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - result = self.simulate_get(self.fizbat_queue_path, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(256 * 1024, - result_doc.get('_max_messages_post_size')) - self.assertEqual(3600, - result_doc.get('_default_message_ttl')) - - @ddt.data('{', '[]', '.', ' ') - def test_bad_metadata(self, document): - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, - body=document) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_too_much_metadata(self): - self.simulate_put(self.fizbat_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size - (len(doc) - 10) + 1 - - doc = doc.format(pad='x' * padding_len) - - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, - body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_way_too_much_metadata(self): - self.simulate_put(self.fizbat_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size * 100 - - doc = doc.format(pad='x' * padding_len) - - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, body=doc) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_custom_metadata(self): - # Set - doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' - - max_size = self.transport_cfg.max_queue_metadata - padding_len = max_size - (len(doc) - 2) - - doc = doc.format(pad='x' * padding_len) - self.simulate_put(self.fizbat_queue_path, - headers=self.headers, - body=doc) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Get - result = self.simulate_get(self.fizbat_queue_path, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - ref_doc = jsonutils.loads(doc) - ref_doc['_default_message_ttl'] = 3600 - ref_doc['_max_messages_post_size'] = 262144 - self.assertEqual(ref_doc, result_doc) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_update_metadata(self): - xyz_queue_path = self.url_prefix + '/queues/xyz' - xyz_queue_path_metadata = xyz_queue_path - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': uuidutils.generate_uuid() - } - # Create - self.simulate_put(xyz_queue_path, headers=headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - headers.update({'Content-Type': - "application/openstack-messaging-v2.0-json-patch"}) - # add metadata - doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},' - '{"op":"add", "path": "/metadata/key2", "value": 1}]') - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc1) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # remove reserved metadata, zaqar will do nothing and return 200, - # because - doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc3) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # replace metadata - doc2 = '[{"op":"replace", "path": "/metadata/key1", "value": 2}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc2) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # replace reserved metadata, zaqar will store the reserved metadata - doc2 = ('[{"op":"replace", "path": "/metadata/_default_message_ttl",' - '"value": 300}]') - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc2) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Get - result = self.simulate_get(xyz_queue_path_metadata, - headers=headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual({'key1': 2, 'key2': 1, - '_default_message_ttl': 300, - '_max_messages_post_size': 262144}, result_doc) - - # remove metadata - doc3 = '[{"op":"remove", "path": "/metadata/key1"}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc3) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # remove reserved metadata - doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc3) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Get - result = self.simulate_get(xyz_queue_path_metadata, - headers=headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual({'key2': 1, '_default_message_ttl': 3600, - '_max_messages_post_size': 262144}, result_doc) - - # replace non-existent metadata - doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc4) - self.assertEqual(falcon.HTTP_409, self.srmock.status) - - # remove non-existent metadata - doc5 = '[{"op":"remove", "path": "/metadata/key3"}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc5) - self.assertEqual(falcon.HTTP_409, self.srmock.status) - - self.simulate_delete(xyz_queue_path, headers=headers) - - # add metadata to non-existent queue - doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},' - '{"op":"add", "path": "/metadata/key2", "value": 1}]') - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc1) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # replace metadata in non-existent queue - doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc4) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - # remove metadata from non-existent queue - doc5 = '[{"op":"remove", "path": "/metadata/key3"}]' - self.simulate_patch(xyz_queue_path_metadata, - headers=headers, - body=doc5) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_list(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - header = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - - # NOTE(kgriffs): It's important that this one sort after the one - # above. This is in order to prove that bug/1236605 is fixed, and - # stays fixed! - alt_project_id = str(arbitrary_number + 1) - - # List empty - result = self.simulate_get(self.queue_path, headers=header) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - results = jsonutils.loads(result[0]) - self.assertEqual([], results['queues']) - self.assertIn('links', results) - self.assertEqual(0, len(results['links'])) - - # Payload exceeded - self.simulate_get(self.queue_path, headers=header, - query_string='limit=21') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Create some - def create_queue(name, project_id, body): - altheader = {'Client-ID': client_id} - if project_id is not None: - altheader['X-Project-ID'] = project_id - uri = self.queue_path + '/' + name - self.simulate_put(uri, headers=altheader, body=body) - - create_queue('q1', project_id, '{"node": 31}') - create_queue('q2', project_id, '{"node": 32}') - create_queue('q3', project_id, '{"node": 33}') - - create_queue('q3', alt_project_id, '{"alt": 1}') - - # List (limit) - result = self.simulate_get(self.queue_path, headers=header, - query_string='limit=2') - - result_doc = jsonutils.loads(result[0]) - self.assertEqual(2, len(result_doc['queues'])) - - # List (no metadata, get all) - result = self.simulate_get(self.queue_path, - headers=header, query_string='limit=5') - - result_doc = jsonutils.loads(result[0]) - [target, params] = result_doc['links'][0]['href'].split('?') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # Ensure we didn't pick up the queue from the alt project. - queues = result_doc['queues'] - self.assertEqual(3, len(queues)) - - # List with metadata - result = self.simulate_get(self.queue_path, headers=header, - query_string='detailed=true') - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - result_doc = jsonutils.loads(result[0]) - [target, params] = result_doc['links'][0]['href'].split('?') - - queue = result_doc['queues'][0] - result = self.simulate_get(queue['href'], headers=header) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(queue['metadata'], result_doc) - self.assertEqual({'node': 31, '_default_message_ttl': 3600, - '_max_messages_post_size': 262144}, result_doc) - - # List tail - self.simulate_get(target, headers=header, query_string=params) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - # List manually-constructed tail - self.simulate_get(target, headers=header, query_string='marker=zzz') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_list_returns_503_on_nopoolfound_exception(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - header = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - - queue_controller = self.boot.storage.queue_controller - - with mock.patch.object(queue_controller, 'list') as mock_queue_list: - - def queue_generator(): - raise storage_errors.NoPoolFound() - - # This generator tries to be like queue controller list generator - # in some ways. - def fake_generator(): - yield queue_generator() - yield {} - mock_queue_list.return_value = fake_generator() - self.simulate_get(self.queue_path, headers=header) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - -class TestQueueLifecycleFaultyDriver(base.V2BaseFaulty): - - config_file = 'wsgi_faulty.conf' - - def test_simple(self): - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': '338730984abc_1' - } - - gumshoe_queue_path = self.url_prefix + '/queues/gumshoe' - doc = '{"messages": {"ttl": 600}}' - self.simulate_put(gumshoe_queue_path, - headers=self.headers, - body=doc) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - location = ('Location', gumshoe_queue_path) - self.assertNotIn(location, self.srmock.headers) - - result = self.simulate_get(gumshoe_queue_path, - headers=self.headers) - result_doc = jsonutils.loads(result[0]) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - self.assertNotEqual(result_doc, jsonutils.loads(doc)) - - self.simulate_get(gumshoe_queue_path + '/stats', - headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_get(self.url_prefix + '/queues', - headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - self.simulate_delete(gumshoe_queue_path, headers=self.headers) - self.assertEqual(falcon.HTTP_503, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_subscriptions.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_subscriptions.py deleted file mode 100644 index 00b3f313..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_subscriptions.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - - -import ddt -import falcon -import mock -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from zaqar.common import auth -from zaqar.notification import notifier -from zaqar.storage import errors as storage_errors -from zaqar import tests as testing -from zaqar.tests.unit.transport.wsgi import base - - -@ddt.ddt -class TestSubscriptionsMongoDB(base.V2Base): - - config_file = 'wsgi_mongodb_pooled.conf' - - @testing.requires_mongodb - def setUp(self): - super(TestSubscriptionsMongoDB, self).setUp() - - if self.conf.pooling: - for i in range(1): - uri = self.conf['drivers:management_store:mongodb'].uri - doc = {'weight': 100, 'uri': uri} - self.simulate_put(self.url_prefix + '/pools/' + str(i), - body=jsonutils.dumps(doc)) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - self.addCleanup(self.simulate_delete, - self.url_prefix + '/pools/' + str(i), - headers=self.headers) - - self.project_id = '7e55e1a7exyz' - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'X-Project-ID': self.project_id - } - self.queue = 'fake-topic' - self.queue_path = self.url_prefix + '/queues/' + self.queue - doc = '{"_ttl": 60}' - self.simulate_put(self.queue_path, body=doc, headers=self.headers) - - self.subscription_path = (self.url_prefix + '/queues/' + self.queue + - '/subscriptions') - self.subscription = 'fake-id' - self.confirm_path = (self.url_prefix + '/queues/' + self.queue + - '/subscriptions/' + self.subscription + - '/confirm') - self.conf.signed_url.secret_key = 'test_key' - - def tearDown(self): - resp = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_doc = jsonutils.loads(resp[0]) - for s in resp_doc['subscriptions']: - self.simulate_delete(self.subscription_path + '/' + s['id'], - headers=self.headers) - - self.simulate_delete(self.queue_path) - super(TestSubscriptionsMongoDB, self).tearDown() - - def _create_subscription(self, - subscriber='http://triger.me', - ttl=600, - options='{"a":1}'): - doc = ('{"subscriber": "%s", "ttl": %s, "options": %s}' % (subscriber, - ttl, - options)) - return self.simulate_post(self.subscription_path, body=doc, - headers=self.headers) - - def test_create_works(self): - resp = self._create_subscription() - self.assertEqual(falcon.HTTP_201, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - - resp_list = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_list_doc = jsonutils.loads(resp_list[0]) - sid = resp_list_doc['subscriptions'][0]['id'] - - self.assertEqual(resp_doc['subscription_id'], sid) - - def test_create_duplicate_409(self): - self._create_subscription(subscriber='http://CCC.com') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # the subscription is not confirmed, So the second request will - # retry confirm and return 201 again. - self._create_subscription(subscriber='http://CCC.com') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - @mock.patch.object(notifier.NotifierDriver, 'send_confirm_notification') - def test_create_and_send_notification(self, mock_send_confirm): - self._create_subscription(subscriber='http://CCC.com') - self.assertEqual(1, mock_send_confirm.call_count) - - @mock.patch.object(notifier.NotifierDriver, 'send_confirm_notification') - def test_recreate(self, mock_send_confirm): - resp = self._create_subscription(subscriber='http://CCC.com') - resp_doc = jsonutils.loads(resp[0]) - s_id1 = resp_doc['subscription_id'] - self.assertEqual(1, mock_send_confirm.call_count) - - resp = self._create_subscription(subscriber='http://CCC.com') - resp_doc = jsonutils.loads(resp[0]) - s_id2 = resp_doc['subscription_id'] - self.assertEqual(2, mock_send_confirm.call_count) - - self.assertEqual(s_id1, s_id2) - - @mock.patch.object(notifier.NotifierDriver, 'send_confirm_notification') - def test_recreate_after_confirmed(self, mock_send_confirm): - resp = self._create_subscription(subscriber='http://CCC.com') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - doc = '{"confirmed": true}' - resp_doc = jsonutils.loads(resp[0]) - confirm_path = (self.url_prefix + '/queues/' + self.queue + - '/subscriptions/' + resp_doc['subscription_id'] + - '/confirm') - self.simulate_put(confirm_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - self.assertEqual(1, mock_send_confirm.call_count) - - self._create_subscription(subscriber='http://CCC.com') - self.assertEqual(falcon.HTTP_409, self.srmock.status) - - def test_create_invalid_body_400(self): - resp = self._create_subscription(options='xxx') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertIn('body could not be parsed', resp_doc['description']) - - def test_create_no_body(self): - resp = self.simulate_post(self.subscription_path, headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - self.assertIn('Missing parameter', - jsonutils.loads(resp[0])['description']) - - def test_create_invalid_subscriber_400(self): - resp = self._create_subscription(subscriber='fake') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertIn('must be supported in the list', resp_doc['description']) - - def test_create_unsupported_subscriber_400(self): - resp = self._create_subscription(subscriber='email://fake') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertIn('must be supported in the list', - resp_doc['description']) - - def test_create_invalid_options_400(self): - resp = self._create_subscription(options='1') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertIn('must be a dict', resp_doc['description']) - - def test_create_invalid_ttl(self): - resp = self._create_subscription(ttl='"invalid"') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertIn('must be an integer', resp_doc['description']) - - def _list_subscription(self, count=10, limit=10, marker=None): - for i in range(count): - self._create_subscription(subscriber='http://' + str(i)) - - query = 'limit={0}'.format(limit) - if marker: - query += '&marker={1}'.format(marker) - - resp = self.simulate_get(self.subscription_path, - query_string=query, - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - resp_doc = jsonutils.loads(resp[0]) - self.assertIsInstance(resp_doc, dict) - self.assertIn('subscriptions', resp_doc) - self.assertIn('links', resp_doc) - subscriptions_list = resp_doc['subscriptions'] - - link = resp_doc['links'][0] - self.assertEqual('next', link['rel']) - href = falcon.uri.parse_query_string(link['href'].split('?')[1]) - self.assertIn('marker', href) - self.assertEqual(str(limit), href['limit']) - - next_query_string = ('marker={marker}&limit={limit}').format(**href) - next_result = self.simulate_get(link['href'].split('?')[0], - query_string=next_query_string) - next_subscriptions = jsonutils.loads(next_result[0]) - next_subscriptions_list = next_subscriptions['subscriptions'] - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - self.assertIn('links', next_subscriptions) - if limit < count: - self.assertEqual(min(limit, count-limit), - len(next_subscriptions_list)) - else: - self.assertEqual(0, len(next_subscriptions_list)) - - self.assertEqual(min(limit, count), len(subscriptions_list)) - - def test_list_works(self): - self._list_subscription() - - def test_list_returns_503_on_nopoolfound_exception(self): - arbitrary_number = 644079696574693 - project_id = str(arbitrary_number) - client_id = uuidutils.generate_uuid() - header = { - 'X-Project-ID': project_id, - 'Client-ID': client_id - } - - subscription_controller = self.boot.storage.subscription_controller - - with mock.patch.object(subscription_controller, 'list') as \ - mock_subscription_list: - - def subscription_generator(): - raise storage_errors.NoPoolFound() - - # This generator tries to be like subscription controller list - # generator in some ways. - def fake_generator(): - yield subscription_generator() - yield {} - mock_subscription_list.return_value = fake_generator() - self.simulate_get(self.subscription_path, headers=header) - self.assertEqual(falcon.HTTP_503, self.srmock.status) - - def test_list_empty(self): - resp = self.simulate_get(self.subscription_path, - headers=self.headers) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - resp_doc = jsonutils.loads(resp[0]) - self.assertIsInstance(resp_doc, dict) - self.assertIn('subscriptions', resp_doc) - self.assertIn('links', resp_doc) - self.assertEqual([], resp_doc['subscriptions']) - self.assertEqual([], resp_doc['links']) - - @ddt.data(1, 5, 10, 15) - def test_listing_works_with_limit(self, limit): - self._list_subscription(count=15, limit=limit) - - def test_listing_marker_is_respected(self): - for i in range(15): - self._create_subscription(subscriber='http://' + str(i)) - - resp = self.simulate_get(self.subscription_path, - query_string='limit=20', - headers=self.headers) - subscriptions_list = jsonutils.loads(resp[0])['subscriptions'] - id_list = sorted([s['id'] for s in subscriptions_list]) - - resp = self.simulate_get(self.subscription_path, - query_string='marker={0}'.format(id_list[9]), - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - next_subscriptions_list = jsonutils.loads(resp[0])['subscriptions'] - self.assertEqual(5, len(next_subscriptions_list)) - # The subscriptions's age should be 0 at this moment. But in some - # unexpected case, such as slow test, the age maybe larger than 0. - self.assertGreaterEqual(next_subscriptions_list[0].pop('age'), - subscriptions_list[10].pop('age')) - self.assertEqual(subscriptions_list[10], next_subscriptions_list[0]) - - def test_get_works(self): - self._create_subscription() - resp = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_doc = jsonutils.loads(resp[0]) - sid = resp_doc['subscriptions'][0]['id'] - subscriber = resp_doc['subscriptions'][0]['subscriber'] - - resp = self.simulate_get(self.subscription_path + '/' + sid, - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertEqual(sid, resp_doc['id']) - self.assertEqual(subscriber, resp_doc['subscriber']) - - def test_get_nonexisting_raise_404(self): - self.simulate_get(self.subscription_path + '/fake', - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_patch_works(self): - self._create_subscription() - resp = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_doc = jsonutils.loads(resp[0]) - sid = resp_doc['subscriptions'][0]['id'] - - resp = self.simulate_patch(self.subscription_path + '/' + sid, - body='{"ttl": 300}', - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - resp = self.simulate_get(self.subscription_path + '/' + sid, - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertEqual(300, resp_doc['ttl']) - - def test_patch_nonexisting_raise_404(self): - self.simulate_patch(self.subscription_path + '/x', - body='{"ttl": 300}', - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_patch_to_duplicate_raise_409(self): - self._create_subscription() - toupdate = self._create_subscription(subscriber='http://update.me', - ttl=600, - options='{"a":1}') - toupdate_sid = jsonutils.loads(toupdate[0])['subscription_id'] - doc = {'subscriber': 'http://triger.me'} - self.simulate_patch(self.subscription_path + '/' + toupdate_sid, - body=jsonutils.dumps(doc), - headers=self.headers) - self.assertEqual(falcon.HTTP_409, self.srmock.status) - - def test_patch_no_body(self): - self._create_subscription() - resp = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_doc = jsonutils.loads(resp[0]) - sid = resp_doc['subscriptions'][0]['id'] - - resp = self.simulate_patch(self.subscription_path + '/' + sid, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - resp_doc = jsonutils.loads(resp[0]) - self.assertNotIn('{subscription_id}', resp_doc['description']) - - def test_patch_invalid_ttl(self): - self.simulate_patch(self.subscription_path + '/x', - body='{"ttl": "invalid"}', - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_patch_invalid_body(self): - resp = self.simulate_patch(self.subscription_path + '/x', - body='[1]', - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertEqual('Subscriptions must be a dict.', - resp_doc['description']) - - def test_delete_works(self): - self._create_subscription() - resp = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_doc = jsonutils.loads(resp[0]) - sid = resp_doc['subscriptions'][0]['id'] - - resp = self.simulate_get(self.subscription_path + '/' + sid, - headers=self.headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - self.simulate_delete(self.subscription_path + '/' + sid, - headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - resp = self.simulate_get(self.subscription_path + '/' + sid, - headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - @mock.patch.object(auth, 'create_trust_id') - def test_create_with_trust(self, create_trust): - create_trust.return_value = 'trust_id' - self.headers['X-USER-ID'] = 'user-id' - self.headers['X-ROLES'] = 'my-roles' - self._create_subscription('trust+http://example.com') - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - self.assertEqual('user-id', create_trust.call_args[0][1]) - self.assertEqual(self.project_id, create_trust.call_args[0][2]) - self.assertEqual(['my-roles'], create_trust.call_args[0][3]) - - resp_list = self.simulate_get(self.subscription_path, - headers=self.headers) - resp_list_doc = jsonutils.loads(resp_list[0]) - options = resp_list_doc['subscriptions'][0]['options'] - - self.assertEqual({'a': 1, 'trust_id': 'trust_id'}, options) - - def test_confirm(self): - doc = '{"confirmed": true}' - resp = self._create_subscription() - resp_doc = jsonutils.loads(resp[0]) - confirm_path = (self.url_prefix + '/queues/' + self.queue + - '/subscriptions/' + resp_doc['subscription_id'] + - '/confirm') - self.simulate_put(confirm_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - def test_confirm_with_invalid_body(self): - doc = '{confirmed:123}' - resp = self.simulate_put(self.confirm_path, body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertIn('body could not be parsed', resp_doc['description']) - - def test_confirm_without_boolean_body(self): - doc = '{"confirmed":123}' - resp = self.simulate_put(self.confirm_path, body=doc, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - resp_doc = jsonutils.loads(resp[0]) - self.assertEqual("The 'confirmed' should be boolean.", - resp_doc['description']) - - def test_confirm_with_non_subscription(self): - doc = '{"confirmed": true}' - self.simulate_put(self.confirm_path, body=doc, headers=self.headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_urls.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_urls.py deleted file mode 100644 index 33b6d546..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_urls.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import falcon -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from zaqar.common import urls -from zaqar.tests.unit.transport.wsgi import base - - -class TestURL(base.V2Base): - - config_file = 'wsgi_mongodb.conf' - - def setUp(self): - super(TestURL, self).setUp() - - self.signed_url_prefix = self.url_prefix + '/queues/shared_queue/share' - - def test_url_generation(self): - timeutils.set_time_override() - self.addCleanup(timeutils.clear_time_override) - - data = {'methods': ['GET', 'POST']} - response = self.simulate_post(self.signed_url_prefix, - body=jsonutils.dumps(data)) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - content = jsonutils.loads(response[0]) - - expires = timeutils.utcnow(True) + datetime.timedelta(days=1) - expires_str = expires.strftime(urls._DATE_FORMAT) - - for field in ['signature', 'project', 'methods', 'paths', 'expires']: - self.assertIn(field, content) - - self.assertEqual(expires_str, content['expires']) - self.assertEqual(data['methods'], content['methods']) - self.assertEqual(['/v2/queues/shared_queue/messages'], - content['paths']) - - def test_url_paths(self): - timeutils.set_time_override() - self.addCleanup(timeutils.clear_time_override) - - data = {'methods': ['GET', 'POST'], - 'paths': ['messages', 'subscriptions']} - response = self.simulate_post(self.signed_url_prefix, - body=jsonutils.dumps(data)) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - content = jsonutils.loads(response[0]) - - self.assertEqual( - ['/v2/queues/shared_queue/messages', - '/v2/queues/shared_queue/subscriptions'], - content['paths']) - - def test_url_bad_request(self): - self.simulate_post(self.signed_url_prefix, body='not json') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - data = {'dummy': 'meh'} - self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - data = {'expires': 'wrong date format'} - self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - data = {'methods': 'methods not list'} - self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - data = {'paths': ['notallowed']} - self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_url_verification_success(self): - data = {'methods': ['GET', 'POST']} - response = self.simulate_post(self.signed_url_prefix, - body=jsonutils.dumps(data)) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - content = jsonutils.loads(response[0]) - - headers = { - 'URL-Signature': content['signature'], - 'URL-Expires': content['expires'], - 'URL-Methods': ','.join(content['methods']), - 'URL-Paths': ','.join(content['paths']) - } - headers.update(self.headers) - - response = self.simulate_get(content['paths'][0], headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def _get_msg_id(self, headers): - return self._get_msg_ids(headers)[0] - - def _get_msg_ids(self, headers): - return headers['location'].rsplit('=', 1)[-1].split(',') - - def test_url_verification_success_with_message_id(self): - doc = {'messages': [{'body': 239, 'ttl': 300}]} - body = jsonutils.dumps(doc) - self.simulate_post(self.url_prefix + '/queues/shared_queue/messages', - body=body, headers=self.headers) - msg_id = self._get_msg_id(self.srmock.headers_dict) - data = {'methods': ['GET', 'POST']} - response = self.simulate_post(self.signed_url_prefix, - body=jsonutils.dumps(data)) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - content = jsonutils.loads(response[0]) - - headers = { - 'URL-Signature': content['signature'], - 'URL-Expires': content['expires'], - 'URL-Methods': ','.join(content['methods']), - 'URL-Paths': ','.join(content['paths']) - } - headers.update(self.headers) - - self.simulate_get(content['paths'][0] + '/' + msg_id, - headers=headers) - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_url_verification_bad_request(self): - path = self.url_prefix + '/queues/shared_queue/messages' - expires = timeutils.utcnow() + datetime.timedelta(days=1) - expires_str = expires.strftime(urls._DATE_FORMAT) - - headers = { - 'URL-Signature': 'dummy', - 'URL-Expires': 'not a real date', - 'URL-Methods': 'GET,POST', - 'URL-Paths': '/v2/queues/shared_queue/messages' - } - headers.update(self.headers) - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - headers = { - 'URL-Signature': 'dummy', - 'URL-Expires': expires_str, - 'URL-Methods': '', - 'URL-Paths': '/v2/queues/shared_queue/messages' - } - headers.update(self.headers) - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - headers = { - 'URL-Signature': 'dummy', - 'URL-Expires': expires_str, - 'URL-Methods': 'nothing here', - 'URL-Paths': '/v2/queues/shared_queue/messages' - } - headers.update(self.headers) - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - headers = { - 'URL-Signature': 'dummy', - 'URL-Expires': expires_str, - 'URL-Methods': 'POST,PUT', - 'URL-Paths': '/v2/queues/shared_queue/messages' - } - headers.update(self.headers) - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - headers = { - 'URL-Signature': 'wrong signature', - 'URL-Expires': expires_str, - 'URL-Methods': 'GET,POST', - 'URL-Paths': '/v2/queues/shared_queue/messages' - } - headers.update(self.headers) - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - headers = { - 'URL-Signature': 'will fail because of the old date', - 'URL-Expires': '2015-01-01T00:00:00', - 'URL-Methods': 'GET,POST', - 'URL-Paths': '/v2/queues/shared_queue/messages' - } - headers.update(self.headers) - self.simulate_get(path, headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) - - def test_url_verification_bad_with_message_id(self): - doc = {'messages': [{'body': 239, 'ttl': 300}]} - body = jsonutils.dumps(doc) - self.simulate_post(self.url_prefix + '/queues/shared_queue/messages', - body=body, headers=self.headers) - msg_id = self._get_msg_id(self.srmock.headers_dict) - data = {'methods': ['GET', 'POST']} - response = self.simulate_post(self.signed_url_prefix, - body=jsonutils.dumps(data)) - - self.assertEqual(falcon.HTTP_200, self.srmock.status) - content = jsonutils.loads(response[0]) - - headers = { - 'URL-Signature': content['signature'], - 'URL-Expires': content['expires'], - 'URL-Methods': ','.join(content['methods']), - 'URL-Paths': ','.join('/queues/shared_queue/claims') - } - headers.update(self.headers) - - self.simulate_get(content['paths'][0] + '/' + msg_id, - headers=headers) - self.assertEqual(falcon.HTTP_404, self.srmock.status) diff --git a/zaqar/tests/unit/transport/wsgi/v2_0/test_validation.py b/zaqar/tests/unit/transport/wsgi/v2_0/test_validation.py deleted file mode 100644 index 23b9a4e4..00000000 --- a/zaqar/tests/unit/transport/wsgi/v2_0/test_validation.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -import falcon - -from oslo_utils import uuidutils -from zaqar.tests.unit.transport.wsgi import base - - -class TestValidation(base.V2Base): - - config_file = 'wsgi_mongodb_validation.conf' - - def setUp(self): - super(TestValidation, self).setUp() - - self.project_id = '7e55e1a7e' - - self.queue_path = self.url_prefix + '/queues/noein' - self.simulate_put(self.queue_path, self.project_id) - - self.headers = { - 'Client-ID': uuidutils.generate_uuid(), - } - - def tearDown(self): - self.simulate_delete(self.queue_path, self.project_id) - super(TestValidation, self).tearDown() - - def test_metadata_deserialization(self): - # Normal case - self.simulate_put(self.queue_path, - self.project_id, - body='{"timespace": "Shangri-la"}') - - self.assertEqual(falcon.HTTP_204, self.srmock.status) - - # Too long - max_queue_metadata = 64 - - doc_tmpl = '{{"Dragon Torc":"{0}"}}' - doc_tmpl_ws = '{{ "Dragon Torc" : "{0}" }}' # with whitespace - envelope_length = len(doc_tmpl.format('')) - - for tmpl in doc_tmpl, doc_tmpl_ws: - gen = '0' * (max_queue_metadata - envelope_length + 1) - doc = tmpl.format(gen) - self.simulate_put(self.queue_path, - self.project_id, - body=doc) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_message_deserialization(self): - # Normal case - body = '{"messages": [{"body": "Dragon Knights", "ttl": 100}]}' - self.simulate_post(self.queue_path + '/messages', - self.project_id, body=body, - headers=self.headers) - - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Both messages' size are too long - max_messages_post_size = 256 - - obj = {'a': 0, 'b': ''} - envelope_length = len(json.dumps(obj, separators=(',', ':'))) - obj['b'] = 'x' * (max_messages_post_size - envelope_length + 1) - - for long_body in ('a' * (max_messages_post_size - 2 + 1), obj): - doc = json.dumps([{'body': long_body, 'ttl': 100}]) - self.simulate_post(self.queue_path + '/messages', - self.project_id, - body=doc, - headers=self.headers) - - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_request_without_client_id(self): - # No Client-ID in headers, it will raise 400 error. - empty_headers = {} - self.simulate_put(self.queue_path, - self.project_id, - body='{"timespace": "Shangri-la"}', - headers=empty_headers) - - def test_subscription_ttl(self): - # Normal case - body = '{"subscriber": "http://trigger.she", "ttl": 100, "options":{}}' - self.simulate_post(self.queue_path + '/subscriptions', - self.project_id, body=body, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Very big TTL - body = ('{"subscriber": "http://a.c", "ttl": 99999999999999999' - ', "options":{}}') - self.simulate_post(self.queue_path + '/subscriptions', - self.project_id, body=body, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_queue_metadata_putting(self): - # Test _default_message_ttl - # TTL normal case - queue_1 = self.url_prefix + '/queues/queue1' - self.simulate_put(queue_1, - self.project_id, - body='{"_default_message_ttl": 60}') - self.addCleanup(self.simulate_delete, queue_1, self.project_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # TTL under min - self.simulate_put(queue_1, - self.project_id, - body='{"_default_message_ttl": 59}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # TTL over max - self.simulate_put(queue_1, - self.project_id, - body='{"_default_message_ttl": 1209601}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Test _max_messages_post_size - # Size normal case - queue_2 = self.url_prefix + '/queues/queue2' - self.simulate_put(queue_2, - self.project_id, - body='{"_max_messages_post_size": 255}') - self.addCleanup(self.simulate_delete, queue_2, self.project_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_201, self.srmock.status) - - # Size over max - self.simulate_put(queue_2, - self.project_id, - body='{"_max_messages_post_size": 257}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - def test_queue_patching(self): - headers = { - 'Client-ID': uuidutils.generate_uuid(), - 'Content-Type': "application/openstack-messaging-v2.0-json-patch" - } - - # Wrong JSON pointer - self.simulate_patch(self.queue_path, - self.project_id, - headers=headers, - body='[{"op":"add","path":"/a","value":2}]') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Wrong op - self.simulate_patch(self.queue_path, - self.project_id, - headers=headers, - body='[{"op":"a","path":"/metadata/a","value":2}]') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - self.simulate_patch(self.queue_path, - self.project_id, - headers=headers, - body='[{"op":"add","path":"/metadata/a",' - '"value":2}]') - self.assertEqual(falcon.HTTP_200, self.srmock.status) - - def test_queue_purge(self): - # Wrong key - queue_1 = self.url_prefix + '/queues/queue1/purge' - self.simulate_post(queue_1, - self.project_id, - body='{"wrong_key": ["messages"]}') - self.addCleanup(self.simulate_delete, queue_1, self.project_id, - headers=self.headers) - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Wrong value - self.simulate_post(queue_1, - self.project_id, - body='{"resource_types": ["wrong_value"]}') - self.assertEqual(falcon.HTTP_400, self.srmock.status) - - # Correct input - self.simulate_post(queue_1, - self.project_id, - body='{"resource_types": ["messages"]}') - self.assertEqual(falcon.HTTP_204, self.srmock.status) diff --git a/zaqar/transport/__init__.py b/zaqar/transport/__init__.py deleted file mode 100644 index 8d2bfde7..00000000 --- a/zaqar/transport/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Zaqar Transport Drivers""" - -from zaqar.transport import base - -# Hoist into package namespace -DriverBase = base.DriverBase diff --git a/zaqar/transport/acl.py b/zaqar/transport/acl.py deleted file mode 100644 index 377563d0..00000000 --- a/zaqar/transport/acl.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy enforcer of Zaqar""" - -import functools - -from oslo_policy import policy - -ENFORCER = None - - -def setup_policy(conf): - global ENFORCER - - ENFORCER = policy.Enforcer(conf) - - -def enforce(rule): - # Late import to prevent cycles - from zaqar.transport.wsgi import errors - - def decorator(func): - @functools.wraps(func) - def handler(*args, **kwargs): - ctx = args[1].env['zaqar.context'] - ENFORCER.enforce(rule, {}, ctx.to_dict(), do_raise=True, - exc=errors.HTTPForbidden) - - return func(*args, **kwargs) - return handler - - return decorator diff --git a/zaqar/transport/base.py b/zaqar/transport/base.py deleted file mode 100644 index c99c6f00..00000000 --- a/zaqar/transport/base.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -from oslo_config import cfg -import six - - -_GENERAL_TRANSPORT_OPTIONS = ( - cfg.StrOpt('auth_strategy', default='', - help=('Backend to use for authentication. ' - 'For no auth, keep it empty. ' - 'Existing strategies: keystone. ' - 'See also the keystone_authtoken section below')), -) - -_RESOURCE_DEFAULTS = ( - cfg.IntOpt('default_message_ttl', default=3600, - help=('Defines how long a message will be accessible.')), - cfg.IntOpt('default_claim_ttl', default=300, - help=('Defines how long a message will be in claimed state.')), - cfg.IntOpt('default_claim_grace', default=60, - help=('Defines the message grace period in seconds.')), - cfg.IntOpt('default_subscription_ttl', default=3600, - help=('Defines how long a subscription will be available.')), -) - -_TRANSPORT_GROUP = 'transport' - - -def _config_options(): - return [ - (None, _GENERAL_TRANSPORT_OPTIONS), - (_TRANSPORT_GROUP, _RESOURCE_DEFAULTS), - ] - - -class ResourceDefaults(object): - """Registers and exposes defaults for resource fields.""" - - def __init__(self, conf): - self._conf = conf - self._conf.register_opts(_RESOURCE_DEFAULTS, group=_TRANSPORT_GROUP) - self._defaults = self._conf[_TRANSPORT_GROUP] - - @property - def message_ttl(self): - return self._defaults.default_message_ttl - - @property - def claim_ttl(self): - return self._defaults.default_claim_ttl - - @property - def claim_grace(self): - return self._defaults.default_claim_grace - - @property - def subscription_ttl(self): - return self._defaults.default_subscription_ttl - - -@six.add_metaclass(abc.ABCMeta) -class DriverBase(object): - """Base class for Transport Drivers to document the expected interface. - - :param conf: configuration instance - :type conf: oslo_config.cfg.CONF - :param storage: The storage driver - :type storage: zaqar.storage.base.DataDriverBase - :param cache: caching object - :type cache: dogpile.cache.region.CacheRegion - :param control: Storage driver to handle the control plane - :type control: zaqar.storage.base.ControlDriverBase - """ - - def __init__(self, conf, storage, cache, control): - self._conf = conf - self._storage = storage - self._cache = cache - self._control = control - - self._conf.register_opts(_GENERAL_TRANSPORT_OPTIONS) - self._defaults = ResourceDefaults(self._conf) - - @abc.abstractmethod - def listen(self): - """Start listening for client requests (self-hosting mode).""" - raise NotImplementedError diff --git a/zaqar/transport/middleware/__init__.py b/zaqar/transport/middleware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/zaqar/transport/middleware/auth.py b/zaqar/transport/middleware/auth.py deleted file mode 100644 index 009177cc..00000000 --- a/zaqar/transport/middleware/auth.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Middleware for handling authorization and authentication.""" - -from keystonemiddleware import auth_token -from oslo_log import log - - -STRATEGIES = {} - -LOG = log.getLogger(__name__) - - -class SignedHeadersAuth(object): - - def __init__(self, app, auth_app): - self._app = app - self._auth_app = auth_app - - def __call__(self, environ, start_response): - path = environ.get('PATH_INFO') - # NOTE(flwang): The root path of Zaqar service shouldn't require any - # auth. - if path == '/': - return self._app(environ, start_response) - - signature = environ.get('HTTP_URL_SIGNATURE') - - if signature is None or path.startswith('/v1'): - return self._auth_app(environ, start_response) - - return self._app(environ, start_response) - - -class KeystoneAuth(object): - - @classmethod - def install(cls, app, conf): - """Install Auth check on application.""" - LOG.debug(u'Installing Keystone\'s auth protocol') - - return auth_token.AuthProtocol(app, - conf={"oslo-config-config": conf, - "oslo-config-project": "zaqar"}) - - -STRATEGIES['keystone'] = KeystoneAuth - - -def strategy(strategy): - """Returns the Auth Strategy. - - :param strategy: String representing - the strategy to use - """ - try: - return STRATEGIES[strategy] - except KeyError: - raise RuntimeError diff --git a/zaqar/transport/middleware/cors.py b/zaqar/transport/middleware/cors.py deleted file mode 100644 index b48254a7..00000000 --- a/zaqar/transport/middleware/cors.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2017 OpenStack, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import six.moves.urllib.parse as urlparse -import webob - -from oslo_log import log -from oslo_middleware import cors - -LOG = log.getLogger(__name__) - - -class Response(webob.Response): - - def __call__(self, environ, start_response): - """WSGI application interface""" - - if self.conditional_response: - return self.conditional_response_app(environ, start_response) - headerlist = self._abs_headerlist(environ) - start_response(self.status, headerlist) - if environ['REQUEST_METHOD'] == 'HEAD': - # Special case here... - # NOTE(wangxiyuan): webob.response.Response always return - # EmptyResponse here. This behavior breaks backward-compatibility. - # so we need to 'fix' it here manually. - return [] - return self._app_iter - - def _abs_headerlist(self, *args, **kwargs): - headerlist = super(Response, self)._abs_headerlist(*args, **kwargs) - - # NOTE(wangxiyuan): webob.response.Response always convert relative - # path to absolute path given the request environ on location field in - # the header of response. This behavior breaks backward-compatibility. - # so we need to 'fix' it here manually. - for i, (name, value) in enumerate(headerlist): - if name.lower() == 'location': - loc = urlparse.urlparse(value) - relative_path = value[value.index(loc.path):] - headerlist[i] = (name, relative_path) - break - - return headerlist - - -class Request(webob.Request): - - ResponseClass = Response - - -class CORSMiddleware(object): - - def __init__(self, app, auth_app, conf): - self._app = cors.CORS(app, conf) - - # We don't auth here. It's just used for keeping consistence. - self._auth_app = auth_app - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - return self._app(request) - - @classmethod - def install(cls, app, auth_app, conf): - - LOG.debug(u'Installing CORS middleware.') - cors.set_defaults( - allow_headers=['X-Auth-Token', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id', - 'X-OpenStack-Request-ID', - 'X-Trace-Info', - 'X-Trace-HMAC', - 'Client-id'], - expose_headers=['X-Auth-Token', - 'X-Subject-Token', - 'X-Service-Token', - 'X-OpenStack-Request-ID'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH', - 'HEAD'] - ) - return CORSMiddleware(app, auth_app, conf) - - -def install_cors(app, auth_app, conf): - return CORSMiddleware.install(app, auth_app, conf) diff --git a/zaqar/transport/middleware/profile.py b/zaqar/transport/middleware/profile.py deleted file mode 100644 index b9aacbd8..00000000 --- a/zaqar/transport/middleware/profile.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2016 OpenStack, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -import six.moves.urllib.parse as urlparse -import webob - -from oslo_log import log -from osprofiler import _utils as utils -from osprofiler import notifier -from osprofiler import profiler -from osprofiler import web - -LOG = log.getLogger(__name__) - - -def setup(conf, binary, host): - if conf.profiler.enabled: - - # Note(wangxiyuan): OSprofiler now support some kind of backends, such - # as Ceilometer, ElasticSearch, Messaging and MongoDB. - # 1. Ceilometer is only used for data collection, and Messaging is only - # used for data transfer. So Ceilometer only works when Messaging is - # enabled. - # 2. ElasticSearch and MongoDB support both data collection and - # transfer. So they can be used standalone. - # 3. Choose which backend depends on the config option - # "connection_string" , and the default value is "messaging://". - backend_uri = conf.profiler.connection_string - if "://" not in backend_uri: - backend_uri += "://" - parsed_connection = urlparse.urlparse(backend_uri) - backend_type = parsed_connection.scheme - if backend_type == "messaging": - import oslo_messaging - _notifier = notifier.create( - backend_uri, oslo_messaging, {}, - oslo_messaging.get_notification_transport(conf), - "Zaqar", binary, host) - else: - _notifier = notifier.create(backend_uri, project="Zaqar", - service=binary, host=host) - notifier.set(_notifier) - LOG.warning("OSProfiler is enabled.\nIt means that person who " - "knows any of hmac_keys that are specified in " - "/etc/zaqar/zaqar.conf can trace his requests. \n In " - "real life only operator can read this file so there " - "is no security issue. Note that even if person can " - "trigger profiler, only admin user can retrieve trace " - "information.\n" - "To disable OSprofiler set in zaqar.conf:\n" - "[profiler]\nenabled=false") - web.enable(conf.profiler.hmac_keys) - else: - web.disable() - - -class ProfileWSGIMiddleware(object): - - def __init__(self, application, hmac_keys=None, enabled=False): - self.application = application - self.name = "wsgi" - self.enabled = enabled - self.hmac_keys = utils.split(hmac_keys or "") - - def _trace_is_valid(self, trace_info): - if not isinstance(trace_info, dict): - return False - trace_keys = set(six.iterkeys(trace_info)) - if not all(k in trace_keys for k in web._REQUIRED_KEYS): - return False - if trace_keys.difference(web._REQUIRED_KEYS + web._OPTIONAL_KEYS): - return False - return True - - def __call__(self, environ, start_response): - request = webob.Request(environ) - trace_info = utils.signed_unpack(request.headers.get(web.X_TRACE_INFO), - request.headers.get(web.X_TRACE_HMAC), - self.hmac_keys) - - if not self._trace_is_valid(trace_info): - return self.application(environ, start_response) - - profiler.init(**trace_info) - info = { - "request": { - "path": request.path, - "query": request.query_string, - "method": request.method, - "scheme": request.scheme - } - } - with profiler.Trace(self.name, info=info): - return self.application(environ, start_response) - - -def install_wsgi_tracer(app, conf): - enabled = conf.profiler.enabled and conf.profiler.trace_wsgi_transport - - if enabled: - LOG.debug(u'Installing osprofiler\'s wsgi tracer') - - return ProfileWSGIMiddleware(app, conf.profiler.hmac_keys, enabled=enabled) diff --git a/zaqar/transport/utils.py b/zaqar/transport/utils.py deleted file mode 100644 index ce69181f..00000000 --- a/zaqar/transport/utils.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -from oslo_utils import encodeutils - - -class MalformedJSON(ValueError): - """JSON string is not valid.""" - pass - - -class OverflowedJSONInteger(OverflowError): - """JSON integer is too large.""" - pass - - -def _json_int(s): - """Parse a string as a base 10 64-bit signed integer.""" - i = int(s) - if not (int(-2 ** 63) <= i <= int(2 ** 63 - 1)): - raise OverflowedJSONInteger() - - return i - - -def read_json(stream, len): - """Like json.load, but converts ValueError to MalformedJSON upon failure. - - :param stream: a file-like object - :param len: the number of bytes to read from stream - """ - try: - content = encodeutils.safe_decode(stream.read(len), 'utf-8') - return json.loads(content, parse_int=_json_int) - except UnicodeDecodeError as ex: - raise MalformedJSON(ex) - except ValueError as ex: - raise MalformedJSON(ex) - - -def to_json(obj): - """Like json.dumps, but outputs a UTF-8 encoded string. - - :param obj: a JSON-serializable object - """ - return json.dumps(obj, ensure_ascii=False) diff --git a/zaqar/transport/validation.py b/zaqar/transport/validation.py deleted file mode 100644 index 45f64bf4..00000000 --- a/zaqar/transport/validation.py +++ /dev/null @@ -1,638 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import re - -from oslo_config import cfg -from oslo_utils import timeutils -import six - -from zaqar.i18n import _ - -MIN_MESSAGE_TTL = 60 -MIN_CLAIM_TTL = 60 -MIN_CLAIM_GRACE = 60 -MIN_SUBSCRIPTION_TTL = 60 -_PURGBLE_RESOURCE_TYPES = {'messages', 'subscriptions'} - -_TRANSPORT_LIMITS_OPTIONS = ( - cfg.IntOpt('max_queues_per_page', default=20, - deprecated_name='queue_paging_uplimit', - deprecated_group='limits:transport', - help='Defines the maximum number of queues per page.'), - - cfg.IntOpt('max_messages_per_page', default=20, - deprecated_name='message_paging_uplimit', - deprecated_group='limits:transport', - help='Defines the maximum number of messages per page.'), - - cfg.IntOpt('max_subscriptions_per_page', default=20, - deprecated_name='subscription_paging_uplimit', - deprecated_group='limits:transport', - help='Defines the maximum number of subscriptions per page.'), - - cfg.IntOpt('max_messages_per_claim_or_pop', default=20, - deprecated_name='max_messages_per_claim', - help='The maximum number of messages that can be claimed (OR) ' - 'popped in a single request'), - - cfg.IntOpt('max_queue_metadata', default=64 * 1024, - deprecated_name='metadata_size_uplimit', - deprecated_group='limits:transport', - help='Defines the maximum amount of metadata in a queue.'), - - cfg.IntOpt('max_messages_post_size', default=256 * 1024, - deprecated_name='message_size_uplimit', - deprecated_group='limits:transport', - deprecated_opts=[cfg.DeprecatedOpt('max_message_size')], - help='Defines the maximum size of message posts.'), - - cfg.IntOpt('max_message_ttl', default=1209600, - deprecated_name='message_ttl_max', - deprecated_group='limits:transport', - help='Maximum amount of time a message will be available.'), - - cfg.IntOpt('max_claim_ttl', default=43200, - deprecated_name='claim_ttl_max', - deprecated_group='limits:transport', - help='Maximum length of a message in claimed state.'), - - cfg.IntOpt('max_claim_grace', default=43200, - deprecated_name='claim_grace_max', - deprecated_group='limits:transport', - help='Defines the maximum message grace period in seconds.'), - - cfg.ListOpt('subscriber_types', default=['http', 'https', 'mailto', - 'trust+http', 'trust+https'], - help='Defines supported subscriber types.'), - - cfg.IntOpt('max_flavors_per_page', default=20, - help='Defines the maximum number of flavors per page.'), - - cfg.IntOpt('max_pools_per_page', default=20, - help='Defines the maximum number of pools per page.'), -) - -_TRANSPORT_LIMITS_GROUP = 'transport' - -# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match -# only ASCII characters. -QUEUE_NAME_REGEX = re.compile('^[a-zA-Z0-9_\-.]+$') -QUEUE_NAME_MAX_LEN = 64 -PROJECT_ID_MAX_LEN = 256 - - -def _config_options(): - return [(_TRANSPORT_LIMITS_GROUP, _TRANSPORT_LIMITS_OPTIONS)] - - -class ValidationFailed(ValueError): - """User input did not follow API restrictions.""" - - def __init__(self, msg, *args, **kwargs): - msg = msg.format(*args, **kwargs) - super(ValidationFailed, self).__init__(msg) - - -class Validator(object): - def __init__(self, conf): - self._conf = conf - self._conf.register_opts(_TRANSPORT_LIMITS_OPTIONS, - group=_TRANSPORT_LIMITS_GROUP) - self._limits_conf = self._conf[_TRANSPORT_LIMITS_GROUP] - self._supported_operations = ('add', 'remove', 'replace') - - def queue_identification(self, queue, project): - """Restrictions on a project id & queue name pair. - - :param queue: Name of the queue - :param project: Project id - :raises ValidationFailed: if the `name` is longer than 64 - characters or contains anything other than ASCII digits and - letters, underscores, and dashes. Also raises if `project` - is not None but longer than 256 characters. - """ - - if project is not None and len(project) > PROJECT_ID_MAX_LEN: - msg = _(u'Project ids may not be more than {0} characters long.') - raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) - - if len(queue) > QUEUE_NAME_MAX_LEN: - msg = _(u'Queue names may not be more than {0} characters long.') - raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) - - if not QUEUE_NAME_REGEX.match(queue): - raise ValidationFailed( - _(u'Queue names may only contain ASCII letters, digits, ' - 'underscores, and dashes.')) - - def _get_change_operation_d10(self, raw_change): - op = raw_change.get('op') - if op is None: - msg = (_('Unable to find `op` in JSON Schema change. ' - 'It must be one of the following: %(available)s.') % - {'available': ', '.join(self._supported_operations)}) - raise ValidationFailed(msg) - if op not in self._supported_operations: - msg = (_('Invalid operation: `%(op)s`. ' - 'It must be one of the following: %(available)s.') % - {'op': op, - 'available': ', '.join(self._supported_operations)}) - raise ValidationFailed(msg) - return op - - def _get_change_path_d10(self, raw_change): - try: - return raw_change['path'] - except KeyError: - msg = _("Unable to find '%s' in JSON Schema change") % 'path' - raise ValidationFailed(msg) - - def _decode_json_pointer(self, pointer): - """Parse a json pointer. - - Json Pointers are defined in - http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . - The pointers use '/' for separation between object attributes, such - that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character - in an attribute name is encoded as "~1" and a '~' character is encoded - as "~0". - """ - self._validate_json_pointer(pointer) - ret = [] - for part in pointer.lstrip('/').split('/'): - ret.append(part.replace('~1', '/').replace('~0', '~').strip()) - return ret - - def _validate_json_pointer(self, pointer): - """Validate a json pointer. - - We only accept a limited form of json pointers. - """ - if not pointer.startswith('/'): - msg = _('Pointer `%s` does not start with "/".') % pointer - raise ValidationFailed(msg) - if re.search('/\s*?/', pointer[1:]): - msg = _('Pointer `%s` contains adjacent "/".') % pointer - raise ValidationFailed(msg) - if len(pointer) > 1 and pointer.endswith('/'): - msg = _('Pointer `%s` end with "/".') % pointer - raise ValidationFailed(msg) - if pointer[1:].strip() == '/': - msg = _('Pointer `%s` does not contains valid token.') % pointer - raise ValidationFailed(msg) - if re.search('~[^01]', pointer) or pointer.endswith('~'): - msg = _('Pointer `%s` contains "~" not part of' - ' a recognized escape sequence.') % pointer - raise ValidationFailed(msg) - - def _get_change_value(self, raw_change, op): - if 'value' not in raw_change: - msg = _('Operation "{0}" requires a member named "value".') - raise ValidationFailed(msg, op) - return raw_change['value'] - - def _validate_change(self, change): - if change['op'] == 'remove': - return - path_root = change['path'][0] - if len(change['path']) >= 1 and path_root.lower() != 'metadata': - msg = _("The root of path must be metadata, e.g /metadata/key.") - raise ValidationFailed(msg) - - def _validate_path(self, op, path): - limits = {'add': 2, 'remove': 2, 'replace': 2} - if len(path) != limits.get(op, 2): - msg = _("Invalid JSON pointer for this resource: " - "'/%s, e.g /metadata/key'") % '/'.join(path) - raise ValidationFailed(msg) - - def _parse_json_schema_change(self, raw_change, draft_version): - if draft_version == 10: - op = self._get_change_operation_d10(raw_change) - path = self._get_change_path_d10(raw_change) - else: - msg = _('Unrecognized JSON Schema draft version') - raise ValidationFailed(msg) - - path_list = self._decode_json_pointer(path) - return op, path_list - - def queue_patching(self, request, changes): - washed_changes = [] - content_types = { - 'application/openstack-messaging-v2.0-json-patch': 10, - } - - json_schema_version = content_types[request.content_type] - - if not isinstance(changes, list): - msg = _('Request body must be a JSON array of operation objects.') - raise ValidationFailed(msg) - - for raw_change in changes: - if not isinstance(raw_change, dict): - msg = _('Operations must be JSON objects.') - raise ValidationFailed(msg) - - (op, path) = self._parse_json_schema_change(raw_change, - json_schema_version) - - # NOTE(flwang): Now the 'path' is a list. - self._validate_path(op, path) - change = {'op': op, 'path': path, - 'json_schema_version': json_schema_version} - - if not op == 'remove': - change['value'] = self._get_change_value(raw_change, op) - - self._validate_change(change) - - washed_changes.append(change) - - return washed_changes - - def queue_listing(self, limit=None, **kwargs): - """Restrictions involving a list of queues. - - :param limit: The expected number of queues in the list - :param kwargs: Ignored arguments passed to storage API - :raises ValidationFailed: if the limit is exceeded - """ - - uplimit = self._limits_conf.max_queues_per_page - if limit is not None and not (0 < limit <= uplimit): - msg = _(u'Limit must be at least 1 and no greater than {0}.') - raise ValidationFailed(msg, self._limits_conf.max_queues_per_page) - - def queue_metadata_length(self, content_length): - """Restrictions on queue's length. - - :param content_length: Queue request's length. - :raises ValidationFailed: if the metadata is oversize. - """ - if content_length is None: - return - if content_length > self._limits_conf.max_queue_metadata: - msg = _(u'Queue metadata is too large. Max size: {0}') - raise ValidationFailed(msg, self._limits_conf.max_queue_metadata) - - def queue_metadata_putting(self, queue_metadata): - """Checking if the reserved attributes of the queue are valid. - - :param queue_metadata: Queue's metadata. - :raises ValidationFailed: if any reserved attribute is invalid. - """ - if not queue_metadata: - return - - queue_default_ttl = queue_metadata.get('_default_message_ttl') - if queue_default_ttl and not isinstance(queue_default_ttl, int): - msg = _(u'_default_message_ttl must be integer.') - raise ValidationFailed(msg) - - if queue_default_ttl: - if not (MIN_MESSAGE_TTL <= queue_default_ttl <= - self._limits_conf.max_message_ttl): - msg = _(u'_default_message_ttl can not exceed {0} ' - 'seconds, and must be at least {1} seconds long.') - raise ValidationFailed( - msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) - - queue_max_msg_size = queue_metadata.get('_max_messages_post_size', - None) - if queue_max_msg_size and not isinstance(queue_max_msg_size, int): - msg = _(u'_max_messages_post_size must be integer.') - raise ValidationFailed(msg) - - if queue_max_msg_size: - if not (0 < queue_max_msg_size <= - self._limits_conf.max_messages_post_size): - raise ValidationFailed( - _(u'_max_messages_post_size can not exceed {0}, ' - ' and must be at least greater than 0.'), - self._limits_conf.max_messages_post_size) - - max_claim_count = queue_metadata.get('_max_claim_count', None) - if max_claim_count and not isinstance(max_claim_count, int): - msg = _(u'_max_claim_count must be integer.') - raise ValidationFailed(msg) - - dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None) - if dlq_ttl and not isinstance(dlq_ttl, int): - msg = _(u'_dead_letter_queue_messages_ttl must be integer.') - raise ValidationFailed(msg) - - if not (MIN_MESSAGE_TTL <= dlq_ttl <= - self._limits_conf.max_message_ttl): - msg = _(u'The TTL for a message may not exceed {0} seconds, ' - 'and must be at least {1} seconds long.') - - def queue_purging(self, document): - """Restrictions the resource types to be purged for a queue. - - :param resource_types: Type list of all resource under a queue - :raises ValidationFailed: if the resource types are invalid - """ - - if 'resource_types' not in document: - msg = _(u'Post body must contain key "resource_types".') - raise ValidationFailed(msg) - - if (not set(document['resource_types']).issubset( - _PURGBLE_RESOURCE_TYPES)): - msg = _(u'Resource types must be a sub set of {0}.') - raise ValidationFailed(msg, _PURGBLE_RESOURCE_TYPES) - - def message_posting(self, messages): - """Restrictions on a list of messages. - - :param messages: A list of messages - :raises ValidationFailed: if any message has a out-of-range - TTL. - """ - - if not messages: - raise ValidationFailed(_(u'No messages to enqueu.')) - - for msg in messages: - self.message_content(msg) - - def message_length(self, content_length, max_msg_post_size=None): - """Restrictions on message post length. - - :param content_length: Queue request's length. - :raises ValidationFailed: if the metadata is oversize. - """ - if content_length is None: - return - - if max_msg_post_size: - try: - min_max_size = min(max_msg_post_size, - self._limits_conf.max_messages_post_size) - if content_length > min_max_size: - raise ValidationFailed( - _(u'Message collection size is too large. The max ' - 'size for current queue is {0}. It is calculated ' - 'by max size = min(max_messages_post_size_config: ' - '{1}, max_messages_post_size_queue: {2}).'), - min_max_size, - self._limits_conf.max_messages_post_size, - max_msg_post_size) - except TypeError: - # NOTE(flwang): If there is a type error when using min(), - # it only happens in py3.x, it will be skipped and compare - # the message length with the size defined in config file. - pass - - if content_length > self._limits_conf.max_messages_post_size: - raise ValidationFailed( - _(u'Message collection size is too large. Max size {0}'), - self._limits_conf.max_messages_post_size) - - def message_content(self, message): - """Restrictions on each message.""" - - ttl = message['ttl'] - - if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl): - msg = _(u'The TTL for a message may not exceed {0} seconds, and ' - 'must be at least {1} seconds long.') - - raise ValidationFailed( - msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) - - def message_listing(self, limit=None, **kwargs): - """Restrictions involving a list of messages. - - :param limit: The expected number of messages in the list - :param kwargs: Ignored arguments passed to storage API - :raises ValidationFailed: if the limit is exceeded - """ - - uplimit = self._limits_conf.max_messages_per_page - if limit is not None and not (0 < limit <= uplimit): - msg = _(u'Limit must be at least 1 and may not ' - 'be greater than {0}.') - - raise ValidationFailed( - msg, self._limits_conf.max_messages_per_page) - - def message_deletion(self, ids=None, pop=None): - """Restrictions involving deletion of messages. - - :param ids: message ids passed in by the delete request - :param pop: count of messages to be POPped - :raises ValidationFailed: if, - pop AND id params are present together - neither pop or id params are present - message count to be popped > maximum allowed - """ - - if pop is not None and ids is not None: - msg = _(u'pop and id params cannot be present together in the ' - 'delete request.') - - raise ValidationFailed(msg) - - if pop is None and ids is None: - msg = _(u'The request should have either "ids" or "pop" ' - 'parameter in the request, to be able to delete.') - - raise ValidationFailed(msg) - - pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop - if pop is not None and not (0 < pop <= pop_uplimit): - msg = _(u'Pop value must be at least 1 and may not ' - 'be greater than {0}.') - - raise ValidationFailed(msg, pop_uplimit) - - delete_uplimit = self._limits_conf.max_messages_per_page - if ids is not None and not (0 < len(ids) <= delete_uplimit): - msg = _(u'ids parameter should have at least 1 and not ' - 'greater than {0} values.') - - raise ValidationFailed(msg, delete_uplimit) - - def claim_creation(self, metadata, limit=None): - """Restrictions on the claim parameters upon creation. - - :param metadata: The claim metadata - :param limit: The number of messages to claim - :raises ValidationFailed: if either TTL or grace is out of range, - or the expected number of messages exceed the limit. - """ - - self.claim_updating(metadata) - - uplimit = self._limits_conf.max_messages_per_claim_or_pop - if limit is not None and not (0 < limit <= uplimit): - msg = _(u'Limit must be at least 1 and may not ' - 'be greater than {0}.') - - raise ValidationFailed( - msg, self._limits_conf.max_messages_per_claim_or_pop) - - grace = metadata['grace'] - - if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace): - msg = _(u'The grace for a claim may not exceed {0} seconds, and ' - 'must be at least {1} seconds long.') - - raise ValidationFailed( - msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE) - - def claim_updating(self, metadata): - """Restrictions on the claim TTL. - - :param metadata: The claim metadata - :raises ValidationFailed: if the TTL is out of range - """ - - ttl = metadata['ttl'] - - if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl): - msg = _(u'The TTL for a claim may not exceed {0} seconds, and ' - 'must be at least {1} seconds long.') - - raise ValidationFailed( - msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL) - - def subscription_posting(self, subscription): - """Restrictions on a creation of subscription. - - :param subscription: dict of subscription - :raises ValidationFailed: if the subscription is invalid. - """ - for p in ('subscriber',): - if p not in subscription.keys(): - raise ValidationFailed(_(u'Missing parameter %s in body.') % p) - - self.subscription_patching(subscription) - - def subscription_patching(self, subscription): - """Restrictions on an update of subscription. - - :param subscription: dict of subscription - :raises ValidationFailed: if the subscription is invalid. - """ - - if not subscription: - raise ValidationFailed(_(u'No subscription to create.')) - - if not isinstance(subscription, dict): - msg = _('Subscriptions must be a dict.') - raise ValidationFailed(msg) - - subscriber = subscription.get('subscriber') - subscriber_type = None - - if subscriber: - parsed_uri = six.moves.urllib_parse.urlparse(subscriber) - subscriber_type = parsed_uri.scheme - - if subscriber_type not in self._limits_conf.subscriber_types: - msg = _(u'The subscriber type of subscription must be ' - u'supported in the list {0}.') - raise ValidationFailed(msg, self._limits_conf.subscriber_types) - - options = subscription.get('options') - if options and not isinstance(options, dict): - msg = _(u'Options must be a dict.') - raise ValidationFailed(msg) - - ttl = subscription.get('ttl') - if ttl: - if not isinstance(ttl, int): - msg = _(u'TTL must be an integer.') - raise ValidationFailed(msg) - - if ttl < MIN_SUBSCRIPTION_TTL: - msg = _(u'The TTL for a subscription ' - 'must be at least {0} seconds long.') - raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL) - - # NOTE(flwang): By this change, technically, user can set a very - # big TTL so as to get a very long subscription. - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - msg = _(u'The TTL seconds for a subscription plus current time' - ' must be less than {0}.') - try: - # NOTE(flwang): If below expression works, then we believe the - # ttl is acceptable otherwise it exceeds the max time of - # python. - now_dt + datetime.timedelta(seconds=ttl) - except OverflowError: - raise ValidationFailed(msg, datetime.datetime.max) - - def subscription_confirming(self, confirmed): - confirmed = confirmed.get('confirmed') - if not isinstance(confirmed, bool): - msg = _(u"The 'confirmed' should be boolean.") - raise ValidationFailed(msg) - - def subscription_listing(self, limit=None, **kwargs): - """Restrictions involving a list of subscriptions. - - :param limit: The expected number of subscriptions in the list - :param kwargs: Ignored arguments passed to storage API - :raises ValidationFailed: if the limit is exceeded - """ - - uplimit = self._limits_conf.max_subscriptions_per_page - if limit is not None and not (0 < limit <= uplimit): - msg = _(u'Limit must be at least 1 and may not ' - 'be greater than {0}.') - - raise ValidationFailed( - msg, self._limits_conf.max_subscriptions_per_page) - - def get_limit_conf_value(self, limit_conf_name=None): - """Return the value of limit configuration. - - :param limit_conf_name: configuration name - """ - return self._limits_conf[limit_conf_name] - - def flavor_listing(self, limit=None, **kwargs): - """Restrictions involving a list of pools. - - :param limit: The expected number of flavors in the list - :param kwargs: Ignored arguments passed to storage API - :raises ValidationFailed: if the limit is exceeded - """ - - uplimit = self._limits_conf.max_flavors_per_page - if limit is not None and not (0 < limit <= uplimit): - msg = _(u'Limit must be at least 1 and no greater than {0}.') - raise ValidationFailed(msg, self._limits_conf.max_flavors_per_page) - - def pool_listing(self, limit=None, **kwargs): - """Restrictions involving a list of pools. - - :param limit: The expected number of flavors in the list - :param kwargs: Ignored arguments passed to storage API - :raises ValidationFailed: if the limit is exceeded - """ - - uplimit = self._limits_conf.max_pools_per_page - if limit is not None and not (0 < limit <= uplimit): - msg = _(u'Limit must be at least 1 and no greater than {0}.') - raise ValidationFailed(msg, self._limits_conf.max_pools_per_page) diff --git a/zaqar/transport/websocket/__init__.py b/zaqar/transport/websocket/__init__.py deleted file mode 100644 index 92959c51..00000000 --- a/zaqar/transport/websocket/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Websocket Transport Driver""" - -from zaqar.transport.websocket import driver - -# Hoist into package namespace -Driver = driver.Driver diff --git a/zaqar/transport/websocket/driver.py b/zaqar/transport/websocket/driver.py deleted file mode 100644 index 681058b4..00000000 --- a/zaqar/transport/websocket/driver.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from oslo_config import cfg -from oslo_log import log as logging - -try: - import asyncio -except ImportError: - import trollius as asyncio - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.transport import base -from zaqar.transport.middleware import auth -from zaqar.transport.websocket import factory - - -_WS_OPTIONS = ( - cfg.HostAddressOpt('bind', default='127.0.0.1', - help='Address on which the self-hosting server will ' - 'listen.'), - - cfg.PortOpt('port', default=9000, - help='Port on which the self-hosting server will listen.'), - - cfg.PortOpt('external-port', - help='Port on which the service is provided to the user.'), - - cfg.HostAddressOpt('notification-bind', - help='Address on which the notification server will ' - 'listen.'), - - cfg.PortOpt('notification-port', default=0, - help='Port on which the notification server will listen.'), - -) - -_WS_GROUP = 'drivers:transport:websocket' - -LOG = logging.getLogger(__name__) - - -def _config_options(): - return [(_WS_GROUP, _WS_OPTIONS)] - - -class Driver(base.DriverBase): - - def __init__(self, conf, api, cache): - super(Driver, self).__init__(conf, None, None, None) - self._api = api - self._cache = cache - - self._conf.register_opts(_WS_OPTIONS, group=_WS_GROUP) - self._ws_conf = self._conf[_WS_GROUP] - - if self._conf.auth_strategy: - auth_strategy = auth.strategy(self._conf.auth_strategy) - self._auth_strategy = lambda app: auth_strategy.install( - app, self._conf) - else: - self._auth_strategy = None - - @decorators.lazy_property(write=False) - def factory(self): - uri = 'ws://' + self._ws_conf.bind + ':' + str(self._ws_conf.port) - return factory.ProtocolFactory( - uri, - handler=self._api, - external_port=self._ws_conf.external_port, - auth_strategy=self._auth_strategy, - loop=asyncio.get_event_loop(), - secret_key=self._conf.signed_url.secret_key) - - @decorators.lazy_property(write=False) - def notification_factory(self): - return factory.NotificationFactory(self.factory) - - def listen(self): - """Self-host the WebSocket server. - - It runs the WebSocket server using 'bind' and 'port' options from the - websocket config group, and the notifiton endpoint using the - 'notification_bind' and 'notification_port' options. - """ - msgtmpl = _(u'Serving on host %(bind)s:%(port)s') - LOG.info(msgtmpl, - {'bind': self._ws_conf.bind, 'port': self._ws_conf.port}) - - loop = asyncio.get_event_loop() - coro_notification = loop.create_server( - self.notification_factory, - self._ws_conf.notification_bind, - self._ws_conf.notification_port) - coro = loop.create_server( - self.factory, - self._ws_conf.bind, - self._ws_conf.port) - - def got_server(task): - # Retrieve the port number of the listening socket - port = task.result().sockets[0].getsockname()[1] - if self._ws_conf.notification_bind is not None: - host = self._ws_conf.notification_bind - else: - host = socket.gethostname() - self.notification_factory.set_subscription_url( - 'http://%s:%s/' % (host, port)) - self._api.set_subscription_factory(self.notification_factory) - - task = asyncio.Task(coro_notification) - task.add_done_callback(got_server) - - loop.run_until_complete(asyncio.wait([coro, task])) - - try: - loop.run_forever() - except KeyboardInterrupt: - pass - finally: - loop.close() diff --git a/zaqar/transport/websocket/factory.py b/zaqar/transport/websocket/factory.py deleted file mode 100644 index 19307300..00000000 --- a/zaqar/transport/websocket/factory.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -from autobahn.asyncio import websocket -import msgpack -from oslo_utils import uuidutils - -from zaqar.transport.websocket import protocol - - -class ProtocolFactory(websocket.WebSocketServerFactory): - - protocol = protocol.MessagingProtocol - - def __init__(self, uri, handler, external_port, auth_strategy, - loop, secret_key): - websocket.WebSocketServerFactory.__init__( - self, url=uri, externalPort=external_port) - self._handler = handler - self._auth_strategy = auth_strategy - self._loop = loop - self._secret_key = secret_key - self._protos = {} - - def __call__(self): - proto_id = uuidutils.generate_uuid() - proto = self.protocol(self._handler, proto_id, self._auth_strategy, - self._loop) - self._protos[proto_id] = proto - proto.factory = self - return proto - - def unregister(self, proto_id): - self._protos.pop(proto_id) - - -class NotificationFactory(object): - - protocol = protocol.NotificationProtocol - - def __init__(self, factory): - self.message_factory = factory - - def set_subscription_url(self, url): - self._subscription_url = url - - def get_subscriber(self, protocol): - return '%s%s' % (self._subscription_url, protocol.proto_id) - - def send_data(self, data, proto_id): - instance = self.message_factory._protos.get(proto_id) - if instance: - # NOTE(Eva-i): incoming data is encoded in JSON, let's convert it - # to MsgPack, if notification should be encoded in binary format. - if instance.notify_in_binary: - data = msgpack.packb(json.loads(data)) - instance.sendMessage(data, instance.notify_in_binary) - - def __call__(self): - return self.protocol(self) diff --git a/zaqar/transport/websocket/protocol.py b/zaqar/transport/websocket/protocol.py deleted file mode 100644 index 066deea0..00000000 --- a/zaqar/transport/websocket/protocol.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import io -import json -import sys - -from autobahn.asyncio import websocket -import msgpack -from oslo_log import log as logging -from oslo_utils import timeutils -import pytz -import txaio - -try: - import asyncio -except ImportError: - import trollius as asyncio - -try: - import mimetools - Message = mimetools.Message -except ImportError: - from email.mime import message - Message = message.MIMEMessage - -from zaqar.common import consts - - -LOG = logging.getLogger(__name__) - - -class MessagingProtocol(websocket.WebSocketServerProtocol): - - _fake_env = { - 'REQUEST_METHOD': 'POST', - 'SERVER_NAME': 'zaqar', - 'SERVER_PORT': 80, - 'SERVER_PROTOCOL': 'HTTP/1.1', - 'PATH_INFO': '/', - 'SCRIPT_NAME': '', - 'wsgi.url_scheme': 'http' - } - - def __init__(self, handler, proto_id, auth_strategy, loop): - txaio.use_asyncio() - websocket.WebSocketServerProtocol.__init__(self) - self._handler = handler - self.proto_id = proto_id - self._auth_strategy = auth_strategy - self._loop = loop - self._authentified = False - self._auth_env = None - self._auth_app = None - self._auth_in_binary = None - self._deauth_handle = None - self.notify_in_binary = None - self._subscriptions = [] - - def onConnect(self, request): - LOG.info("Client connecting: %s", request.peer) - - def onOpen(self): - LOG.info("WebSocket connection open.") - - def onMessage(self, payload, isBinary): - # Deserialize the request - try: - if isBinary: - payload = msgpack.unpackb(payload, encoding='utf-8') - else: - payload = json.loads(payload) - except Exception: - if isBinary: - pack_name = 'binary (MessagePack)' - else: - pack_name = 'text (JSON)' - ex_type, ex_value = sys.exc_info()[:2] - ex_name = ex_type.__name__ - msg = 'Can\'t decode {0} request. {1}: {2}'.format( - pack_name, ex_name, ex_value) - LOG.debug(msg) - body = {'error': msg} - resp = self._handler.create_response(400, body) - return self._send_response(resp, isBinary) - # Check if the request is dict - if not isinstance(payload, dict): - body = { - 'error': 'Unexpected body type. Expected dict or dict like.' - } - resp = self._handler.create_response(400, body) - return self._send_response(resp, isBinary) - # Parse the request - req = self._handler.create_request(payload, self._auth_env) - # Validate and process the request - resp = self._handler.validate_request(payload, req) - if resp is None: - if self._auth_strategy and not self._authentified: - if self._auth_app or payload.get('action') != 'authenticate': - if 'URL-Signature' in payload.get('headers', {}): - if self._handler.verify_signature( - self.factory._secret_key, payload): - resp = self._handler.process_request(req, self) - else: - body = {'error': 'Not authentified.'} - resp = self._handler.create_response( - 403, body, req) - else: - body = {'error': 'Not authentified.'} - resp = self._handler.create_response(403, body, req) - else: - return self._authenticate(payload, isBinary) - elif payload.get('action') == 'authenticate': - return self._authenticate(payload, isBinary) - else: - resp = self._handler.process_request(req, self) - if payload.get('action') == consts.SUBSCRIPTION_CREATE: - # NOTE(Eva-i): this will make further websocket - # notifications encoded in the same format as the last - # successful websocket subscription create request. - if resp._headers['status'] == 201: - subscriber = payload['body'].get('subscriber') - # If there is no subscriber, the user has created websocket - # subscription. - if not subscriber: - self.notify_in_binary = isBinary - self._subscriptions.append(resp) - return self._send_response(resp, isBinary) - - def onClose(self, wasClean, code, reason): - self._handler.clean_subscriptions(self._subscriptions) - self.factory.unregister(self.proto_id) - LOG.info("WebSocket connection closed: %s", reason) - - def _authenticate(self, payload, in_binary): - self._auth_in_binary = in_binary - self._auth_app = self._auth_strategy(self._auth_start) - env = self._fake_env.copy() - env.update( - (self._header_to_env_var(key), value) - for key, value in payload.get('headers').items()) - self._auth_app(env, self._auth_response) - - def _auth_start(self, env, start_response): - self._authentified = True - self._auth_env = dict( - (self._env_var_to_header(key), value) - for key, value in env.items()) - self._auth_app = None - expire = env['keystone.token_info']['token']['expires_at'] - expire_time = timeutils.parse_isotime(expire) - now = datetime.datetime.now(tz=pytz.UTC) - delta = (expire_time - now).total_seconds() - if self._deauth_handle is not None: - self._deauth_handle.cancel() - self._deauth_handle = self._loop.call_later( - delta, self._deauthenticate) - - start_response('200 OK', []) - - def _deauthenticate(self): - self._authentified = False - self._auth_env = None - self.sendClose(4003, u'Authentication expired.') - - def _auth_response(self, status, message): - code = int(status.split()[0]) - req = self._handler.create_request({'action': 'authenticate'}) - if code != 200: - # NOTE(wangxiyuan): _auth_app should be cleaned up the after the - # authentication failure so that the client can be authenticated - # again. - self._auth_app = None - body = {'error': 'Authentication failed.'} - resp = self._handler.create_response(code, body, req) - self._send_response(resp, self._auth_in_binary) - else: - body = {'message': 'Authentified.'} - resp = self._handler.create_response(200, body, req) - self._send_response(resp, self._auth_in_binary) - - def _header_to_env_var(self, key): - return 'HTTP_%s' % key.replace('-', '_').upper() - - def _env_var_to_header(self, key): - if key.startswith("HTTP_"): - return key[5:].replace("_", "-") - else: - return key - - def _send_response(self, resp, in_binary): - if in_binary: - pack_name = 'bin' - self.sendMessage(msgpack.packb(resp.get_response()), True) - else: - pack_name = 'txt' - self.sendMessage(json.dumps(resp.get_response()), False) - if LOG.isEnabledFor(logging.INFO): - api = resp._request._api - status = resp._headers['status'] - action = resp._request._action - # Dump to JSON to print body without unicode prefixes on Python 2 - body = json.dumps(resp._request._body) - var_dict = {'api': api, 'pack_name': pack_name, 'status': - status, 'action': action, 'body': body} - LOG.info('Response: API %(api)s %(pack_name)s, %(status)s. ' - 'Request: action "%(action)s", body %(body)s.', - var_dict) - - -class NotificationProtocol(asyncio.Protocol): - - def __init__(self, factory): - self._factory = factory - - def connection_made(self, transport): - self._transport = transport - self._data = bytearray() - self._state = 'INIT' - self._subscriber_id = None - self._length = 0 - - def write_status(self, status): - self._transport.write(b'HTTP/1.0 %s\r\n\r\n' % status) - self._transport.close() - - def data_received(self, data): - self._data.extend(data) - if self._state == 'INIT' and b'\r\n' in self._data: - first_line, self._data = self._data.split(b'\r\n', 1) - verb, uri, version = first_line.split() - if verb != b'POST': - self.write_status(b'405 Not Allowed') - return - self._state = 'HEADERS' - self._subscriber_id = uri[1:] - - if self._state == 'HEADERS' and b'\r\n\r\n' in self._data: - headers, self._data = self._data.split(b'\r\n\r\n', 1) - headers = Message(io.BytesIO(headers)) - length = headers.get(b'content-length') - if not length: - self.write_status(b'400 Bad Request') - return - self._length = int(length) - self._state = 'BODY' - - if self._state == 'BODY': - if len(self._data) >= self._length: - if self._subscriber_id: - self._factory.send_data(bytes(self._data), - str(self._subscriber_id)) - self.write_status(b'200 OK') - else: - self.write_status(b'400 Bad Request') - - def connection_lost(self, exc): - self._data = self._subscriber_id = None - self._length = 0 diff --git a/zaqar/transport/wsgi/__init__.py b/zaqar/transport/wsgi/__init__.py deleted file mode 100644 index dacc5927..00000000 --- a/zaqar/transport/wsgi/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""WSGI Transport Driver""" - -from zaqar.transport.wsgi import driver - -# Hoist into package namespace -Driver = driver.Driver diff --git a/zaqar/transport/wsgi/app.py b/zaqar/transport/wsgi/app.py deleted file mode 100644 index ff490911..00000000 --- a/zaqar/transport/wsgi/app.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""WSGI App for WSGI Containers - -This app should be used by external WSGI -containers. For example: - - $ gunicorn zaqar.transport.wsgi.app:app - -NOTE: As for external containers, it is necessary -to put config files in the standard paths. There's -no common way to specify / pass configuration files -to the WSGI app when it is called from other apps. -""" - -from oslo_config import cfg -from oslo_log import log -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from zaqar import bootstrap -from zaqar import version - -# Use the global CONF instance -conf = cfg.CONF -gmr_opts.set_defaults(conf) -log.register_options(conf) -conf(project='zaqar', prog='zaqar-queues', args=[]) -log.setup(conf, 'zaqar') - -gmr.TextGuruMeditation.setup_autorun(version, conf=conf) - -boot = bootstrap.Bootstrap(conf) -conf.drivers.transport = 'wsgi' -application = boot.transport.app -# Keep the old name for compatibility -app = application diff --git a/zaqar/transport/wsgi/driver.py b/zaqar/transport/wsgi/driver.py deleted file mode 100644 index 4541a841..00000000 --- a/zaqar/transport/wsgi/driver.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from distutils import version as d_version -import falcon -import six -import socket -from wsgiref import simple_server - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import netutils - -from zaqar.common import decorators -from zaqar.common.transport.wsgi import helpers -from zaqar.i18n import _ -from zaqar import transport -from zaqar.transport import acl -from zaqar.transport.middleware import auth -from zaqar.transport.middleware import cors -from zaqar.transport.middleware import profile -from zaqar.transport import validation -from zaqar.transport.wsgi import v1_0 -from zaqar.transport.wsgi import v1_1 -from zaqar.transport.wsgi import v2_0 -from zaqar.transport.wsgi import version - -_WSGI_OPTIONS = ( - cfg.HostAddressOpt('bind', default='127.0.0.1', - help='Address on which the self-hosting server will ' - 'listen.'), - - cfg.PortOpt('port', default=8888, - help='Port on which the self-hosting server will listen.'), -) - -_WSGI_GROUP = 'drivers:transport:wsgi' - -LOG = logging.getLogger(__name__) - - -def _config_options(): - return [(_WSGI_GROUP, _WSGI_OPTIONS)] - - -class FuncMiddleware(object): - - def __init__(self, func): - self.func = func - - def process_resource(self, req, resp, resource, params): - return self.func(req, resp, params) - - -class Driver(transport.DriverBase): - - def __init__(self, conf, storage, cache, control): - super(Driver, self).__init__(conf, storage, cache, control) - - self._conf.register_opts(_WSGI_OPTIONS, group=_WSGI_GROUP) - self._wsgi_conf = self._conf[_WSGI_GROUP] - self._validate = validation.Validator(self._conf) - - self.app = None - self._init_routes() - self._init_middleware() - - def _verify_pre_signed_url(self, req, resp, params): - return helpers.verify_pre_signed_url(self._conf.signed_url.secret_key, - req, resp, params) - - def _validate_queue_identification(self, req, resp, params): - return helpers.validate_queue_identification( - self._validate.queue_identification, req, resp, params) - - @decorators.lazy_property(write=False) - def before_hooks(self): - """Exposed to facilitate unit testing.""" - return [ - self._verify_pre_signed_url, - helpers.require_content_type_be_non_urlencoded, - helpers.require_accepts_json, - helpers.require_client_id, - helpers.extract_project_id, - - # NOTE(jeffrey4l): Depends on the project_id and client_id being - # extracted above - helpers.inject_context, - - # NOTE(kgriffs): Depends on project_id being extracted, above - self._validate_queue_identification - ] - - def _init_routes(self): - """Initialize hooks and URI routes to resources.""" - - catalog = [ - ('/v1', v1_0.public_endpoints(self, self._conf)), - ('/v1.1', v1_1.public_endpoints(self, self._conf)), - ('/v2', v2_0.public_endpoints(self, self._conf)), - ('/', [('', version.Resource())]) - ] - - if self._conf.admin_mode: - catalog.extend([ - ('/v1', v1_0.private_endpoints(self, self._conf)), - ('/v1.1', v1_1.private_endpoints(self, self._conf)), - ('/v2', v2_0.private_endpoints(self, self._conf)), - ]) - - # NOTE(wanghao): Since hook feature has removed after 1.0.0, using - # middleware instead of it, but for the compatibility with old version, - # we support them both now. Hook way can be removed after falcon - # version must be bigger than 1.0.0 in requirements. - if (d_version.LooseVersion(falcon.__version__) >= - d_version.LooseVersion("1.0.0")): - middleware = [FuncMiddleware(hook) for hook in self.before_hooks] - self.app = falcon.API(middleware=middleware) - else: - self.app = falcon.API(before=self.before_hooks) - - self.app.add_error_handler(Exception, self._error_handler) - - for version_path, endpoints in catalog: - if endpoints: - for route, resource in endpoints: - self.app.add_route(version_path + route, resource) - - def _init_middleware(self): - """Initialize WSGI middlewarez.""" - - # NOTE(zhiyan): Install Profiler - if (self._conf.profiler.enabled and - self._conf.profiler.trace_wsgi_transport): - self.app = profile.install_wsgi_tracer(self.app, self._conf) - - auth_app = self.app - # NOTE(flaper87): Install Auth - if self._conf.auth_strategy: - strategy = auth.strategy(self._conf.auth_strategy) - auth_app = strategy.install(self.app, self._conf) - - self.app = auth.SignedHeadersAuth(self.app, auth_app) - - # NOTE(wangxiyuan): Install CORS, this middleware should be called - # before Keystone auth. - self.app = cors.install_cors(self.app, auth_app, self._conf) - - acl.setup_policy(self._conf) - - def _error_handler(self, exc, request, response, params): - if isinstance(exc, falcon.HTTPError): - raise exc - LOG.exception(exc) - raise falcon.HTTPInternalServerError('Internal server error', - six.text_type(exc)) - - def _get_server_cls(self, host): - """Return an appropriate WSGI server class base on provided host - - :param host: The listen host for the zaqar API server. - """ - server_cls = simple_server.WSGIServer - if netutils.is_valid_ipv6(host): - if getattr(server_cls, 'address_family') == socket.AF_INET: - class server_cls(server_cls): - address_family = socket.AF_INET6 - return server_cls - - def listen(self): - """Self-host using 'bind' and 'port' from the WSGI config group.""" - - msgtmpl = _(u'Serving on host %(bind)s:%(port)s') - LOG.info(msgtmpl, - {'bind': self._wsgi_conf.bind, 'port': self._wsgi_conf.port}) - server_cls = self._get_server_cls(self._wsgi_conf.bind) - httpd = simple_server.make_server(self._wsgi_conf.bind, - self._wsgi_conf.port, - self.app, - server_cls) - httpd.serve_forever() diff --git a/zaqar/transport/wsgi/errors.py b/zaqar/transport/wsgi/errors.py deleted file mode 100644 index 89877455..00000000 --- a/zaqar/transport/wsgi/errors.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon - -from zaqar.i18n import _ - - -class HTTPServiceUnavailable(falcon.HTTPServiceUnavailable): - """Wraps falcon.HTTPServiceUnavailable with Zaqar messaging.""" - - TITLE = _(u'Service temporarily unavailable') - DESCRIPTION = _(u'Please try again in a few seconds.') - - def __init__(self, description, retry_after=30): - description = description + ' ' + self.DESCRIPTION - super(HTTPServiceUnavailable, self).__init__( - self.TITLE, description, retry_after) - - -class HTTPBadRequestAPI(falcon.HTTPBadRequest): - """Wraps falcon.HTTPBadRequest with a contextual title.""" - - TITLE = _(u'Invalid API request') - - def __init__(self, description): - super(HTTPBadRequestAPI, self).__init__(self.TITLE, description) - - -class HTTPBadRequestBody(falcon.HTTPBadRequest): - """Wraps falcon.HTTPBadRequest with a contextual title.""" - - TITLE = _(u'Invalid request body') - - def __init__(self, description): - super(HTTPBadRequestBody, self).__init__(self.TITLE, description) - - -class HTTPDocumentTypeNotSupported(HTTPBadRequestBody): - """Wraps HTTPBadRequestBody with a standard description.""" - - DESCRIPTION = _(u'Document type not supported.') - - def __init__(self): - super(HTTPDocumentTypeNotSupported, self).__init__(self.DESCRIPTION) - - -class HTTPForbidden(falcon.HTTPForbidden): - """Wraps falcon.HTTPForbidden with a contextual title.""" - - TITLE = _(u'Not authorized') - DESCRIPTION = _(u'You are not authorized to complete this action.') - - def __init__(self): - super(HTTPForbidden, self).__init__(self.TITLE, self.DESCRIPTION) - - -class HTTPConflict(falcon.HTTPConflict): - """Wraps falcon.HTTPConflict with contextual title.""" - - TITLE = _(u'Resource conflict') - - def __init__(self, description, **kwargs): - super(HTTPConflict, self).__init__(self.TITLE, description, **kwargs) - - -class HTTPNotFound(falcon.HTTPNotFound): - """Wraps falcon.HTTPConflict with contextual title.""" - - TITLE = _(u'Not found') - - def __init__(self, description): - super(HTTPNotFound, self).__init__(title=self.TITLE, - description=description) - - -class HTTPUnsupportedMediaType(falcon.HTTPUnsupportedMediaType): - """Wraps falcon.HTTPUnsupportedMediaType with contextual title.""" - - def __init__(self, description): - super(HTTPUnsupportedMediaType, self).__init__(description) diff --git a/zaqar/transport/wsgi/utils.py b/zaqar/transport/wsgi/utils.py deleted file mode 100644 index d682678c..00000000 --- a/zaqar/transport/wsgi/utils.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon -import jsonschema -from oslo_log import log as logging - -from zaqar.i18n import _ -from zaqar.transport import utils -from zaqar.transport.wsgi import errors - -JSONObject = dict -"""Represents a JSON object in Python.""" - -JSONArray = list -"""Represents a JSON array in Python.""" - -LOG = logging.getLogger(__name__) - - -# -# TODO(kgriffs): Create Falcon "before" hooks adapters for these functions -# - - -def deserialize(stream, len): - """Deserializes JSON from a file-like stream. - - This function deserializes JSON from a stream, including - translating read and parsing errors to HTTP error types. - - :param stream: file-like object from which to read an object or - array of objects. - :param len: number of bytes to read from stream - :raises HTTPBadRequest: if the request is invalid - :raises HTTPServiceUnavailable: if the http service is unavailable - """ - - if len is None: - description = _(u'Request body can not be empty') - raise errors.HTTPBadRequestBody(description) - - try: - # TODO(kgriffs): read_json should stream the resulting list - # of messages, returning a generator rather than buffering - # everything in memory (bp/streaming-serialization). - return utils.read_json(stream, len) - - except utils.MalformedJSON as ex: - LOG.debug(ex) - description = _(u'Request body could not be parsed.') - raise errors.HTTPBadRequestBody(description) - - except utils.OverflowedJSONInteger as ex: - LOG.debug(ex) - description = _(u'JSON contains integer that is too large.') - raise errors.HTTPBadRequestBody(description) - - except Exception as ex: - # Error while reading from the network/server - LOG.exception(ex) - description = _(u'Request body could not be read.') - raise errors.HTTPServiceUnavailable(description) - - -def sanitize(document, spec=None, doctype=JSONObject): - """Validates a document and drops undesired fields. - - :param document: A dict to verify according to `spec`. - :param spec: (Default None) Iterable describing expected fields, - yielding tuples with the form of: - - (field_name, value_type, default_value) - - Note that value_type may either be a Python type, or the - special string '*' to accept any type. default_value is the - default to give the field if it is missing, or None to require - that the field be present. - - If spec is None, the incoming documents will not be validated. - :param doctype: type of document to expect; must be either - JSONObject or JSONArray. - :raises HTTPBadRequestBody: if the request is invalid - :returns: A sanitized, filtered version of the document. If the - document is a list of objects, each object will be filtered - and returned in a new list. If, on the other hand, the document - is expected to contain a single object, that object's fields will - be filtered and the resulting object will be returned. - """ - - if doctype is JSONObject: - if not isinstance(document, JSONObject): - raise errors.HTTPDocumentTypeNotSupported() - - return document if spec is None else filter(document, spec) - - if doctype is JSONArray: - if not isinstance(document, JSONArray): - raise errors.HTTPDocumentTypeNotSupported() - - if spec is None: - return document - - return [filter(obj, spec) for obj in document] - - raise TypeError('doctype must be either a JSONObject or JSONArray') - - -def filter(document, spec): - """Validates and retrieves typed fields from a single document. - - Sanitizes a dict-like document by checking it against a - list of field spec, and returning only those fields - specified. - - :param document: dict-like object - :param spec: iterable describing expected fields, yielding - tuples with the form of: (field_name, value_type). Note that - value_type may either be a Python type, or the special - string '*' to accept any type. - :raises HTTPBadRequest: if any field is missing or not an - instance of the specified type - :returns: A filtered dict containing only the fields - listed in the spec - """ - - filtered = {} - for name, value_type, default_value in spec: - filtered[name] = get_checked_field(document, name, - value_type, default_value) - - return filtered - - -def get_checked_field(document, name, value_type, default_value): - """Validates and retrieves a typed field from a document. - - This function attempts to look up doc[name], and raises - appropriate HTTP errors if the field is missing or not an - instance of the given type. - - :param document: dict-like object - :param name: field name - :param value_type: expected value type, or '*' to accept any type - :param default_value: Default value to use if the value is missing, - or None to make the value required. - :raises HTTPBadRequest: if the field is missing or not an - instance of value_type - :returns: value obtained from doc[name] - """ - - try: - value = document[name] - except KeyError: - if default_value is not None: - value = default_value - else: - description = _(u'Missing "{name}" field.').format(name=name) - raise errors.HTTPBadRequestBody(description) - - # PERF(kgriffs): We do our own little spec thing because it is way - # faster than jsonschema. - if value_type == '*' or isinstance(value, value_type): - return value - - description = _(u'The value of the "{name}" field must be a {vtype}.') - description = description.format(name=name, vtype=value_type.__name__) - raise errors.HTTPBadRequestBody(description) - - -def load(req): - """Reads request body, raising an exception if it is not JSON. - - :param req: The request object to read from - :type req: falcon.Request - :return: a dictionary decoded from the JSON stream - :rtype: dict - :raises HTTPBadRequestBody: if JSON could not be parsed - """ - try: - return utils.read_json(req.stream, req.content_length) - except (utils.MalformedJSON, utils.OverflowedJSONInteger) as ex: - LOG.exception(ex) - raise errors.HTTPBadRequestBody( - 'JSON could not be parsed.' - ) - - -# TODO(cpp-cabrera): generalize this -def validate(validator, document): - """Verifies a document against a schema. - - :param validator: a validator to use to check validity - :type validator: jsonschema.Draft4Validator - :param document: document to check - :type document: dict - :raises HTTPBadRequestBody: if the request is invalid - """ - try: - validator.validate(document) - except jsonschema.ValidationError as ex: - raise errors.HTTPBadRequestBody( - '{0}: {1}'.format(ex.args, ex.message) - ) - - -def message_url(message, base_path, claim_id=None): - path = "/".join([base_path, 'messages', message['id']]) - if claim_id: - path += falcon.to_query_str({'claim_id': claim_id}) - return path - - -def format_message_v1(message, base_path, claim_id=None): - return { - 'href': message_url(message, base_path, claim_id), - 'ttl': message['ttl'], - 'age': message['age'], - 'body': message['body'], - } - - -def format_message_v1_1(message, base_path, claim_id=None): - url = message_url(message, base_path, claim_id) - return { - 'id': message['id'], - 'href': url, - 'ttl': message['ttl'], - 'age': message['age'], - 'body': message['body'], - } diff --git a/zaqar/transport/wsgi/v1_0/__init__.py b/zaqar/transport/wsgi/v1_0/__init__.py deleted file mode 100644 index 1176975b..00000000 --- a/zaqar/transport/wsgi/v1_0/__init__.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_log import log as logging - -from zaqar.common import decorators -from zaqar.transport.wsgi.v1_0 import claims -from zaqar.transport.wsgi.v1_0 import health -from zaqar.transport.wsgi.v1_0 import homedoc -from zaqar.transport.wsgi.v1_0 import messages -from zaqar.transport.wsgi.v1_0 import metadata -from zaqar.transport.wsgi.v1_0 import pools -from zaqar.transport.wsgi.v1_0 import queues -from zaqar.transport.wsgi.v1_0 import stats - - -LOG = logging.getLogger(__name__) - - -VERSION = { - 'id': '1', - 'status': 'DEPRECATED', - 'updated': '2014-9-11T17:47:05Z', - 'media-types': [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.messaging-v1+json' - } - ], - 'links': [ - { - 'href': '/v1/', - 'rel': 'self' - } - ] -} - - -@decorators.api_version_manager(VERSION) -def public_endpoints(driver, conf): - queue_controller = driver._storage.queue_controller - message_controller = driver._storage.message_controller - claim_controller = driver._storage.claim_controller - - return [ - # Home - ('/', - homedoc.Resource()), - - # Queues Endpoints - ('/queues', - queues.CollectionResource(driver._validate, - queue_controller)), - ('/queues/{queue_name}', - queues.ItemResource(queue_controller, - message_controller)), - ('/queues/{queue_name}/stats', - stats.Resource(queue_controller)), - ('/queues/{queue_name}/metadata', - metadata.Resource(driver._wsgi_conf, driver._validate, - queue_controller)), - - # Messages Endpoints - ('/queues/{queue_name}/messages', - messages.CollectionResource(driver._wsgi_conf, - driver._validate, - message_controller)), - ('/queues/{queue_name}/messages/{message_id}', - messages.ItemResource(message_controller)), - - # Claims Endpoints - ('/queues/{queue_name}/claims', - claims.CollectionResource(driver._wsgi_conf, - driver._validate, - claim_controller)), - ('/queues/{queue_name}/claims/{claim_id}', - claims.ItemResource(driver._wsgi_conf, - driver._validate, - claim_controller)), - - # Health - ('/health', - health.Resource(driver._storage)) - ] - - -@decorators.api_version_manager(VERSION) -def private_endpoints(driver, conf): - if not conf.pooling: - return [] - - pools_controller = driver._control.pools_controller - - return [ - ('/pools', - pools.Listing(pools_controller)), - ('/pools/{pool}', - pools.Resource(pools_controller)), - ] diff --git a/zaqar/transport/wsgi/v1_0/claims.py b/zaqar/transport/wsgi/v1_0/claims.py deleted file mode 100644 index 8df308c2..00000000 --- a/zaqar/transport/wsgi/v1_0/claims.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - -CLAIM_POST_SPEC = (('ttl', int, None), ('grace', int, None)) -CLAIM_PATCH_SPEC = (('ttl', int, None), ('grace', int, 0)) - - -class Resource(object): - - __slots__ = ('_claim_controller', '_validate') - - def __init__(self, wsgi_conf, validate, claim_controller): - self._claim_controller = claim_controller - self._validate = validate - - -class CollectionResource(Resource): - - @decorators.TransportLog("Claims collection") - def on_post(self, req, resp, project_id, queue_name): - # Check for an explicit limit on the # of messages to claim - limit = req.get_param_as_int('limit') - claim_options = {} if limit is None else {'limit': limit} - - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document, CLAIM_POST_SPEC) - - # Claim some messages - try: - self._validate.claim_creation(metadata, limit=limit) - cid, msgs = self._claim_controller.create( - queue_name, - metadata=metadata, - project=project_id, - **claim_options) - - # Buffer claimed messages - # TODO(kgriffs): optimize, along with serialization (below) - resp_msgs = list(msgs) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Serialize claimed messages, if any. This logic assumes - # the storage driver returned well-formed messages. - if len(resp_msgs) != 0: - resp_msgs = [wsgi_utils.format_message_v1( - msg, req.path.rpartition('/')[0], cid) for msg in resp_msgs] - - resp.location = req.path + '/' + cid - resp.body = utils.to_json(resp_msgs) - resp.status = falcon.HTTP_201 - else: - resp.status = falcon.HTTP_204 - - -class ItemResource(Resource): - - __slots__ = ('_claim_controller', '_validate') - - def __init__(self, wsgi_conf, validate, claim_controller): - self._claim_controller = claim_controller - self._validate = validate - - @decorators.TransportLog("Claim item") - def on_get(self, req, resp, project_id, queue_name, claim_id): - try: - meta, msgs = self._claim_controller.get( - queue_name, - claim_id=claim_id, - project=project_id) - - # Buffer claimed messages - # TODO(kgriffs): Optimize along with serialization (see below) - meta['messages'] = list(msgs) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be queried.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Serialize claimed messages - # TODO(kgriffs): Optimize - meta['messages'] = [wsgi_utils.format_message_v1( - msg, req.path.rsplit('/', 2)[0], meta['id']) - for msg in meta['messages']] - - meta['href'] = req.path - del meta['id'] - - resp.content_location = req.relative_uri - resp.body = utils.to_json(meta) - # status defaults to 200 - - @decorators.TransportLog("Claim item") - def on_patch(self, req, resp, project_id, queue_name, claim_id): - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document, CLAIM_PATCH_SPEC) - - try: - self._validate.claim_updating(metadata) - self._claim_controller.update(queue_name, - claim_id=claim_id, - metadata=metadata, - project=project_id) - - resp.status = falcon.HTTP_204 - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be updated.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - @decorators.TransportLog("Claim item") - def on_delete(self, req, resp, project_id, queue_name, claim_id): - try: - self._claim_controller.delete(queue_name, - claim_id=claim_id, - project=project_id) - - resp.status = falcon.HTTP_204 - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v1_0/health.py b/zaqar/transport/wsgi/v1_0/health.py deleted file mode 100644 index 398aba35..00000000 --- a/zaqar/transport/wsgi/v1_0/health.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon - - -class Resource(object): - - __slots__ = ('_driver',) - - def __init__(self, driver): - self._driver = driver - - def on_get(self, req, resp, **kwargs): - resp.status = (falcon.HTTP_204 if self._driver.is_alive() - else falcon.HTTP_503) - - def on_head(self, req, resp, **kwargs): - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v1_0/homedoc.py b/zaqar/transport/wsgi/v1_0/homedoc.py deleted file mode 100644 index 01387160..00000000 --- a/zaqar/transport/wsgi/v1_0/homedoc.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import json - - -# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03 -JSON_HOME = { - 'resources': { - # ----------------------------------------------------------------- - # Queues - # ----------------------------------------------------------------- - 'rel/queues': { - 'href-template': '/v1/queues{?marker,limit,detailed}', - 'href-vars': { - 'marker': 'param/marker', - 'limit': 'param/queue_limit', - 'detailed': 'param/detailed', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue': { - 'href-template': '/v1/queues/{queue_name}', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['GET', 'HEAD', 'PUT', 'DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue-metadata': { - 'href-template': '/v1/queues/{queue_name}/metadata', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['GET', 'PUT'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue-stats': { - 'href-template': '/v1/queues/{queue_name}/stats', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Messages - # ----------------------------------------------------------------- - 'rel/messages': { - 'href-template': ('/v1/queues/{queue_name}/messages' - '{?marker,limit,echo,include_claimed}'), - 'href-vars': { - 'queue_name': 'param/queue_name', - 'marker': 'param/marker', - 'limit': 'param/messages_limit', - 'echo': 'param/echo', - 'include_claimed': 'param/include_claimed', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/post-messages': { - 'href-template': '/v1/queues/{queue_name}/messages', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'], - }, - }, - - # ----------------------------------------------------------------- - # Claims - # ----------------------------------------------------------------- - 'rel/claim': { - 'href-template': '/v1/queues/{queue_name}/claims{?limit}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'limit': 'param/claim_limit', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - }, - }, - - } -} - - -class Resource(object): - - def __init__(self): - document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4) - self.document_utf8 = document.encode('utf-8') - - def on_get(self, req, resp, project_id): - resp.data = self.document_utf8 - - resp.content_type = 'application/json-home' - resp.cache_control = ['max-age=86400'] - # status defaults to 200 diff --git a/zaqar/transport/wsgi/v1_0/messages.py b/zaqar/transport/wsgi/v1_0/messages.py deleted file mode 100644 index d84a15db..00000000 --- a/zaqar/transport/wsgi/v1_0/messages.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.common.transport.wsgi import helpers as wsgi_helpers -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - -MESSAGE_POST_SPEC = (('ttl', int, None), ('body', '*', None)) - - -class CollectionResource(object): - - __slots__ = ('_message_controller', '_wsgi_conf', '_validate') - - def __init__(self, wsgi_conf, validate, message_controller): - self._wsgi_conf = wsgi_conf - self._validate = validate - self._message_controller = message_controller - - # ---------------------------------------------------------------------- - # Helpers - # ---------------------------------------------------------------------- - - def _get_by_id(self, base_path, project_id, queue_name, ids): - """Returns one or more messages from the queue by ID.""" - try: - self._validate.message_listing(limit=len(ids)) - messages = self._message_controller.bulk_get( - queue_name, - message_ids=ids, - project=project_id) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - messages = list(messages) - if not messages: - return None - - return [wsgi_utils.format_message_v1(m, base_path) for m in messages] - - def _get(self, req, project_id, queue_name): - client_uuid = wsgi_helpers.get_client_uuid(req) - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - req.get_param_as_bool('echo', store=kwargs) - req.get_param_as_bool('include_claimed', store=kwargs) - - try: - self._validate.message_listing(**kwargs) - results = self._message_controller.list( - queue_name, - project=project_id, - client_uuid=client_uuid, - **kwargs) - - # Buffer messages - cursor = next(results) - messages = list(cursor) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - if not messages: - return None - - # Found some messages, so prepare the response - kwargs['marker'] = next(results) - base_path = req.path.rsplit('/', 1)[0] - messages = [wsgi_utils.format_message_v1( - m, base_path) for m in messages] - - return { - 'messages': messages, - 'links': [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - } - - # ---------------------------------------------------------------------- - # Interface - # ---------------------------------------------------------------------- - - @decorators.TransportLog("Messages collection") - def on_post(self, req, resp, project_id, queue_name): - client_uuid = wsgi_helpers.get_client_uuid(req) - - try: - # Place JSON size restriction before parsing - self._validate.message_length(req.content_length) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - # Deserialize and validate the request body - document = wsgi_utils.deserialize(req.stream, req.content_length) - messages = wsgi_utils.sanitize(document, MESSAGE_POST_SPEC, - doctype=wsgi_utils.JSONArray) - - try: - self._validate.message_posting(messages) - - message_ids = self._message_controller.post( - queue_name, - messages=messages, - project=project_id, - client_uuid=client_uuid) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except storage_errors.MessageConflict as ex: - LOG.exception(ex) - description = _(u'No messages could be enqueued.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be enqueued.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare the response - ids_value = ','.join(message_ids) - resp.location = req.path + '?ids=' + ids_value - - hrefs = [req.path + '/' + id for id in message_ids] - - # NOTE(kgriffs): As of the Icehouse release, drivers are - # no longer allowed to enqueue a subset of the messages - # submitted by the client; it's all or nothing. Therefore, - # 'partial' is now always False in the v1.0 API, and the - # field has been removed in v1.1. - body = {'resources': hrefs, 'partial': False} - - resp.body = utils.to_json(body) - resp.status = falcon.HTTP_201 - - @decorators.TransportLog("Messages collection") - def on_get(self, req, resp, project_id, queue_name): - resp.content_location = req.relative_uri - - ids = req.get_param_as_list('ids') - if ids is None: - response = self._get(req, project_id, queue_name) - else: - response = self._get_by_id(req.path.rsplit('/', 1)[0], project_id, - queue_name, ids) - - if response is None: - resp.status = falcon.HTTP_204 - return - - resp.body = utils.to_json(response) - # status defaults to 200 - - @decorators.TransportLog("Messages collection") - def on_delete(self, req, resp, project_id, queue_name): - # NOTE(zyuan): Attempt to delete the whole message collection - # (without an "ids" parameter) is not allowed - ids = req.get_param_as_list('ids', required=True) - - try: - self._validate.message_listing(limit=len(ids)) - self._message_controller.bulk_delete( - queue_name, - message_ids=ids, - project=project_id) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 - - -class ItemResource(object): - - __slots__ = '_message_controller' - - def __init__(self, message_controller): - self._message_controller = message_controller - - @decorators.TransportLog("Messages item") - def on_get(self, req, resp, project_id, queue_name, message_id): - try: - message = self._message_controller.get( - queue_name, - message_id, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.content_location = req.relative_uri - message = wsgi_utils.format_message_v1( - message, req.path.rsplit('/', 2)[0]) - resp.body = utils.to_json(message) - # status defaults to 200 - - @decorators.TransportLog("Messages item") - def on_delete(self, req, resp, project_id, queue_name, message_id): - error_title = _(u'Unable to delete') - - try: - self._message_controller.delete( - queue_name, - message_id=message_id, - project=project_id, - claim=req.get_param('claim_id')) - - except storage_errors.MessageNotClaimed as ex: - LOG.debug(ex) - description = _(u'A claim was specified, but the message ' - u'is not currently claimed.') - raise falcon.HTTPBadRequest(error_title, description) - - except storage_errors.ClaimDoesNotExist as ex: - LOG.debug(ex) - description = _(u'The specified claim does not exist or ' - u'has expired.') - raise falcon.HTTPBadRequest(error_title, description) - - except storage_errors.NotPermitted as ex: - LOG.debug(ex) - description = _(u'This message is claimed; it cannot be ' - u'deleted without a valid claim ID.') - raise falcon.HTTPForbidden(error_title, description) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Alles guete - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v1_0/metadata.py b/zaqar/transport/wsgi/v1_0/metadata.py deleted file mode 100644 index f044ad89..00000000 --- a/zaqar/transport/wsgi/v1_0/metadata.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - - -LOG = logging.getLogger(__name__) - - -class Resource(object): - __slots__ = ('_wsgi_conf', '_validate', '_queue_ctrl') - - def __init__(self, _wsgi_conf, validate, queue_controller): - self._wsgi_conf = _wsgi_conf - self._validate = validate - self._queue_ctrl = queue_controller - - @decorators.TransportLog("Queue metadata") - def on_get(self, req, resp, project_id, queue_name): - try: - resp_dict = self._queue_ctrl.get_metadata(queue_name, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue metadata could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.content_location = req.path - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - @decorators.TransportLog("Queue metadata") - def on_put(self, req, resp, project_id, queue_name): - try: - # Place JSON size restriction before parsing - self._validate.queue_metadata_length(req.content_length) - # Deserialize queue metadata - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document) - # Restrict setting any reserved queue attributes - for key in metadata: - if key.startswith('_'): - description = _(u'Reserved queue attributes in metadata ' - u'(which names start with "_") can not be ' - u'set in API v1.') - raise validation.ValidationFailed(description) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - try: - self._queue_ctrl.set_metadata(queue_name, - metadata=metadata, - project=project_id) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.QueueDoesNotExist as ex: - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Metadata could not be updated.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 - resp.location = req.path diff --git a/zaqar/transport/wsgi/v1_0/pools.py b/zaqar/transport/wsgi/v1_0/pools.py deleted file mode 100644 index dbe39b28..00000000 --- a/zaqar/transport/wsgi/v1_0/pools.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""pools: a resource to handle storage pool management - -A pool is added by an operator by interacting with the -pooling-related endpoints. When specifying a pool, the -following fields are required: - -:: - - { - "name": string, - "weight": integer, - "uri": string::uri - } - -Furthermore, depending on the underlying storage type of pool being -registered, there is an optional field:: - - { - "options": {...} - } -""" - -import falcon -import jsonschema -from oslo_log import log -import six - -from zaqar.common.api.schemas import pools as schema -from zaqar.common import utils as common_utils -from zaqar.storage import errors -from zaqar.storage import utils as storage_utils -from zaqar.transport import utils as transport_utils -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = log.getLogger(__name__) - - -class Listing(object): - """A resource to list registered pools - - :param pools_controller: means to interact with storage - """ - - def __init__(self, pools_controller): - self._ctrl = pools_controller - - def on_get(self, request, response, project_id): - """Returns a pool listing as objects embedded in an object: - - :: - - { - "pools": [ - {"href": "", "weight": 100, "uri": ""}, - ... - ], - "links": [ - {"href": "", "rel": "next"} - ] - } - - :returns: HTTP | 200 - """ - - LOG.debug(u'LIST pools') - - store = {} - request.get_param('marker', store=store) - request.get_param_as_int('limit', store=store) - request.get_param_as_bool('detailed', store=store) - - cursor = self._ctrl.list(**store) - - pools = list(next(cursor)) - - results = {} - - if pools: - store['marker'] = next(cursor) - - for entry in pools: - entry['href'] = request.path + '/' + entry['name'] - - results['links'] = [ - { - 'rel': 'next', - 'href': request.path + falcon.to_query_str(store) - } - ] - results['pools'] = pools - - response.content_location = request.relative_uri - response.body = transport_utils.to_json(results) - response.status = falcon.HTTP_200 - - -class Resource(object): - """A handler for individual pool. - - :param pools_controller: means to interact with storage - """ - - def __init__(self, pools_controller): - self._ctrl = pools_controller - validator_type = jsonschema.Draft4Validator - self._validators = { - 'weight': validator_type(schema.patch_weight), - 'uri': validator_type(schema.patch_uri), - 'options': validator_type(schema.patch_options), - 'create': validator_type(schema.create) - } - - def on_get(self, request, response, project_id, pool): - """Returns a JSON object for a single pool entry: - - :: - - {"weight": 100, "uri": "", options: {...}} - - :returns: HTTP | [200, 404] - """ - LOG.debug(u'GET pool - name: %s', pool) - data = None - detailed = request.get_param_as_bool('detailed') or False - - try: - data = self._ctrl.get(pool, detailed) - - except errors.PoolDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - data['href'] = request.path - - response.body = transport_utils.to_json(data) - response.content_location = request.relative_uri - - def on_put(self, request, response, project_id, pool): - """Registers a new pool. Expects the following input: - - :: - - {"weight": 100, "uri": ""} - - An options object may also be provided. - - :returns: HTTP | [201, 204] - """ - - LOG.debug(u'PUT pool - name: %s', pool) - - conf = self._ctrl.driver.conf - data = wsgi_utils.load(request) - wsgi_utils.validate(self._validators['create'], data) - if not storage_utils.can_connect(data['uri'], conf=conf): - raise wsgi_errors.HTTPBadRequestBody( - 'cannot connect to %s' % data['uri'] - ) - try: - self._ctrl.create(pool, weight=data['weight'], - uri=data['uri'], - options=data.get('options', {})) - response.status = falcon.HTTP_201 - response.location = request.path - except errors.PoolAlreadyExists as e: - LOG.exception(e) - raise wsgi_errors.HTTPConflict(six.text_type(e)) - - def on_delete(self, request, response, project_id, pool): - """Deregisters a pool. - - :returns: HTTP | 204 - """ - - LOG.debug(u'DELETE pool - name: %s', pool) - self._ctrl.delete(pool) - response.status = falcon.HTTP_204 - - def on_patch(self, request, response, project_id, pool): - """Allows one to update a pool's weight, uri, and/or options. - - This method expects the user to submit a JSON object - containing at least one of: 'uri', 'weight', 'options'. If - none are found, the request is flagged as bad. There is also - strict format checking through the use of - jsonschema. Appropriate errors are returned in each case for - badly formatted input. - - :returns: HTTP | 200,400 - """ - - LOG.debug(u'PATCH pool - name: %s', pool) - data = wsgi_utils.load(request) - - EXPECT = ('weight', 'uri', 'options') - if not any([(field in data) for field in EXPECT]): - LOG.debug(u'PATCH pool, bad params') - raise wsgi_errors.HTTPBadRequestBody( - 'One of `uri`, `weight`, or `options` needs ' - 'to be specified' - ) - - for field in EXPECT: - wsgi_utils.validate(self._validators[field], data) - - conf = self._ctrl.driver.conf - if 'uri' in data and not storage_utils.can_connect(data['uri'], - conf=conf): - raise wsgi_errors.HTTPBadRequestBody( - 'cannot connect to %s' % data['uri'] - ) - fields = common_utils.fields(data, EXPECT, - pred=lambda v: v is not None) - - try: - self._ctrl.update(pool, **fields) - except errors.PoolDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) diff --git a/zaqar/transport/wsgi/v1_0/queues.py b/zaqar/transport/wsgi/v1_0/queues.py deleted file mode 100644 index e43274c6..00000000 --- a/zaqar/transport/wsgi/v1_0/queues.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors - - -LOG = logging.getLogger(__name__) - - -class ItemResource(object): - - __slots__ = ('_queue_controller', '_message_controller') - - def __init__(self, queue_controller, message_controller): - self._queue_controller = queue_controller - self._message_controller = message_controller - - @decorators.TransportLog("Queue item") - def on_put(self, req, resp, project_id, queue_name): - try: - created = self._queue_controller.create( - queue_name, project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_201 if created else falcon.HTTP_204 - resp.location = req.path - - @decorators.TransportLog("Queue item") - def on_head(self, req, resp, project_id, queue_name): - if self._queue_controller.exists(queue_name, project=project_id): - resp.status = falcon.HTTP_204 - else: - resp.status = falcon.HTTP_404 - - resp.content_location = req.path - - on_get = on_head - - @decorators.TransportLog("Queue item") - def on_delete(self, req, resp, project_id, queue_name): - try: - self._queue_controller.delete(queue_name, project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 - - -class CollectionResource(object): - - __slots__ = ('_queue_controller', '_validate') - - def __init__(self, validate, queue_controller): - self._queue_controller = queue_controller - self._validate = validate - - def on_get(self, req, resp, project_id): - LOG.debug(u'Queue collection GET') - - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - req.get_param_as_bool('detailed', store=kwargs) - - try: - self._validate.queue_listing(**kwargs) - results = self._queue_controller.list(project=project_id, **kwargs) - - # Buffer list of queues - queues = list(next(results)) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queues could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Check for an empty list - if len(queues) == 0: - resp.status = falcon.HTTP_204 - return - - # Got some. Prepare the response. - kwargs['marker'] = next(results) - for each_queue in queues: - each_queue['href'] = req.path + '/' + each_queue['name'] - - response_body = { - 'queues': queues, - 'links': [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - } - - resp.content_location = req.relative_uri - resp.body = utils.to_json(response_body) - # status defaults to 200 diff --git a/zaqar/transport/wsgi/v1_0/stats.py b/zaqar/transport/wsgi/v1_0/stats.py deleted file mode 100644 index a296c894..00000000 --- a/zaqar/transport/wsgi/v1_0/stats.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -import six - -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport.wsgi import errors as wsgi_errors - - -LOG = logging.getLogger(__name__) - - -class Resource(object): - - __slots__ = '_queue_ctrl' - - def __init__(self, queue_controller): - self._queue_ctrl = queue_controller - - def on_get(self, req, resp, project_id, queue_name): - try: - resp_dict = self._queue_ctrl.stats(queue_name, - project=project_id) - - message_stats = resp_dict['messages'] - - if message_stats['total'] != 0: - base_path = req.path[:req.path.rindex('/')] + '/messages/' - - newest = message_stats['newest'] - newest['href'] = base_path + newest['id'] - del newest['id'] - - oldest = message_stats['oldest'] - oldest['href'] = base_path + oldest['id'] - del oldest['id'] - - resp.content_location = req.path - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - except storage_errors.QueueIsEmpty as ex: - resp_dict = { - 'messages': { - 'claimed': 0, - 'free': 0, - 'total': 0 - } - } - resp.body = utils.to_json(resp_dict) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue stats could not be read.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v1_1/__init__.py b/zaqar/transport/wsgi/v1_1/__init__.py deleted file mode 100644 index bec7c531..00000000 --- a/zaqar/transport/wsgi/v1_1/__init__.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_log import log as logging - -from zaqar.common import decorators -from zaqar.transport.wsgi.v1_1 import claims -from zaqar.transport.wsgi.v1_1 import flavors -from zaqar.transport.wsgi.v1_1 import health -from zaqar.transport.wsgi.v1_1 import homedoc -from zaqar.transport.wsgi.v1_1 import messages -from zaqar.transport.wsgi.v1_1 import ping -from zaqar.transport.wsgi.v1_1 import pools -from zaqar.transport.wsgi.v1_1 import queues -from zaqar.transport.wsgi.v1_1 import stats - - -LOG = logging.getLogger(__name__) - - -VERSION = { - 'id': '1.1', - 'status': 'DEPRECATED', - 'updated': '2016-7-29T02:22:47Z', - 'media-types': [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.messaging-v1_1+json' - } - ], - 'links': [ - { - 'href': '/v1.1/', - 'rel': 'self' - } - ] -} - - -@decorators.api_version_manager(VERSION) -def public_endpoints(driver, conf): - queue_controller = driver._storage.queue_controller - message_controller = driver._storage.message_controller - claim_controller = driver._storage.claim_controller - - defaults = driver._defaults - - return [ - # Home - ('/', - homedoc.Resource(conf)), - - # Queues Endpoints - ('/queues', - queues.CollectionResource(driver._validate, - queue_controller)), - ('/queues/{queue_name}', - queues.ItemResource(driver._validate, - queue_controller, - message_controller)), - ('/queues/{queue_name}/stats', - stats.Resource(queue_controller)), - - # Messages Endpoints - ('/queues/{queue_name}/messages', - messages.CollectionResource(driver._wsgi_conf, - driver._validate, - message_controller, - queue_controller, - defaults.message_ttl)), - ('/queues/{queue_name}/messages/{message_id}', - messages.ItemResource(message_controller)), - - # Claims Endpoints - ('/queues/{queue_name}/claims', - claims.CollectionResource(driver._wsgi_conf, - driver._validate, - claim_controller, - defaults.claim_ttl, - defaults.claim_grace)), - ('/queues/{queue_name}/claims/{claim_id}', - claims.ItemResource(driver._wsgi_conf, - driver._validate, - claim_controller, - defaults.claim_ttl, - defaults.claim_grace)), - - # Ping - ('/ping', - ping.Resource(driver._storage)) - ] - - -@decorators.api_version_manager(VERSION) -def private_endpoints(driver, conf): - - catalogue = [ - # Health - ('/health', - health.Resource(driver._storage)), - ] - - if conf.pooling: - pools_controller = driver._control.pools_controller - flavors_controller = driver._control.flavors_controller - - catalogue.extend([ - ('/pools', - pools.Listing(pools_controller)), - ('/pools/{pool}', - pools.Resource(pools_controller)), - ('/flavors', - flavors.Listing(flavors_controller)), - ('/flavors/{flavor}', - flavors.Resource(flavors_controller)), - ]) - - return catalogue diff --git a/zaqar/transport/wsgi/v1_1/claims.py b/zaqar/transport/wsgi/v1_1/claims.py deleted file mode 100644 index aa11225c..00000000 --- a/zaqar/transport/wsgi/v1_1/claims.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - - -class CollectionResource(object): - __slots__ = ( - '_claim_controller', - '_validate', - '_claim_post_spec', - '_default_meta', - ) - - def __init__(self, wsgi_conf, validate, claim_controller, - default_claim_ttl, default_grace_ttl): - - self._claim_controller = claim_controller - self._validate = validate - - self._claim_post_spec = ( - ('ttl', int, default_claim_ttl), - ('grace', int, default_grace_ttl), - ) - - # NOTE(kgriffs): Create this once up front, rather than creating - # a new dict every time, for the sake of performance. - self._default_meta = { - 'ttl': default_claim_ttl, - 'grace': default_grace_ttl, - } - - @decorators.TransportLog("Claims collection") - def on_post(self, req, resp, project_id, queue_name): - # Check for an explicit limit on the # of messages to claim - limit = req.get_param_as_int('limit') - claim_options = {} if limit is None else {'limit': limit} - - # NOTE(kgriffs): Clients may or may not actually include the - # Content-Length header when the body is empty; the following - # check works for both 0 and None. - if not req.content_length: - # No values given, so use defaults - metadata = self._default_meta - else: - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document, self._claim_post_spec) - - # Claim some messages - try: - self._validate.claim_creation(metadata, limit=limit) - - cid, msgs = self._claim_controller.create( - queue_name, - metadata=metadata, - project=project_id, - **claim_options) - - # Buffer claimed messages - # TODO(kgriffs): optimize, along with serialization (below) - resp_msgs = list(msgs) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Serialize claimed messages, if any. This logic assumes - # the storage driver returned well-formed messages. - if len(resp_msgs) != 0: - base_path = req.path.rpartition('/')[0] - resp_msgs = [wsgi_utils.format_message_v1_1(msg, base_path, cid) - for msg in resp_msgs] - - resp.location = req.path + '/' + cid - resp.body = utils.to_json({'messages': resp_msgs}) - resp.status = falcon.HTTP_201 - else: - resp.status = falcon.HTTP_204 - - -class ItemResource(object): - - __slots__ = ('_claim_controller', '_validate', '_claim_patch_spec') - - def __init__(self, wsgi_conf, validate, claim_controller, - default_claim_ttl, default_grace_ttl): - self._claim_controller = claim_controller - self._validate = validate - - self._claim_patch_spec = ( - ('ttl', int, default_claim_ttl), - ('grace', int, default_grace_ttl), - ) - - @decorators.TransportLog("Claim item") - def on_get(self, req, resp, project_id, queue_name, claim_id): - try: - meta, msgs = self._claim_controller.get( - queue_name, - claim_id=claim_id, - project=project_id) - - # Buffer claimed messages - # TODO(kgriffs): Optimize along with serialization (see below) - meta['messages'] = list(msgs) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be queried.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Serialize claimed messages - # TODO(kgriffs): Optimize - base_path = req.path.rsplit('/', 2)[0] - meta['messages'] = [wsgi_utils.format_message_v1_1(msg, base_path, - claim_id) - for msg in meta['messages']] - - meta['href'] = req.path - del meta['id'] - - resp.body = utils.to_json(meta) - # status defaults to 200 - - @decorators.TransportLog("Claim item") - def on_patch(self, req, resp, project_id, queue_name, claim_id): - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document, self._claim_patch_spec) - - try: - self._validate.claim_updating(metadata) - self._claim_controller.update(queue_name, - claim_id=claim_id, - metadata=metadata, - project=project_id) - - resp.status = falcon.HTTP_204 - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be updated.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - @decorators.TransportLog("Claim item") - def on_delete(self, req, resp, project_id, queue_name, claim_id): - try: - self._claim_controller.delete(queue_name, - claim_id=claim_id, - project=project_id) - - resp.status = falcon.HTTP_204 - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v1_1/flavors.py b/zaqar/transport/wsgi/v1_1/flavors.py deleted file mode 100644 index 4b497c2e..00000000 --- a/zaqar/transport/wsgi/v1_1/flavors.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -import jsonschema -from oslo_log import log -import six - -from zaqar.common.api.schemas import flavors as schema -from zaqar.common import utils as common_utils -from zaqar.i18n import _ -from zaqar.storage import errors -from zaqar.transport import utils as transport_utils -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = log.getLogger(__name__) - - -class Listing(object): - """A resource to list registered flavors - - :param flavors_controller: means to interact with storage - """ - - def __init__(self, flavors_controller): - self._ctrl = flavors_controller - - def on_get(self, request, response, project_id): - """Returns a flavor listing as objects embedded in an object: - - :: - - { - "flavors": [ - {"href": "", "capabilities": {}, "pool": ""}, - ... - ], - "links": [ - {"rel": "next", "href": ""}, - ... - ] - } - - :returns: HTTP | 200 - """ - - LOG.debug(u'LIST flavors for project_id %s', project_id) - - store = {} - request.get_param('marker', store=store) - request.get_param_as_int('limit', store=store) - request.get_param_as_bool('detailed', store=store) - - cursor = self._ctrl.list(project=project_id, **store) - flavors = list(next(cursor)) - - results = {'links': []} - - if flavors: - store['marker'] = next(cursor) - - for entry in flavors: - entry['href'] = request.path + '/' + entry['name'] - # NOTE(wanghao): remove this in Newton. - entry['pool'] = entry['pool_group'] - - results['links'] = [ - { - 'rel': 'next', - 'href': request.path + falcon.to_query_str(store) - } - ] - - results['flavors'] = flavors - - response.body = transport_utils.to_json(results) - response.status = falcon.HTTP_200 - - -class Resource(object): - """A handler for individual flavor. - - :param flavors_controller: means to interact with storage - """ - - def __init__(self, flavors_controller): - self._ctrl = flavors_controller - validator_type = jsonschema.Draft4Validator - self._validators = { - 'create': validator_type(schema.create), - 'pool_group': validator_type(schema.patch_pool_group), - # NOTE(wanghao): Remove this in Newton. - 'pool': validator_type(schema.patch_pool), - 'capabilities': validator_type(schema.patch_capabilities), - } - - def on_get(self, request, response, project_id, flavor): - """Returns a JSON object for a single flavor entry: - - :: - - {"pool_group": "", capabilities: {...}} - - :returns: HTTP | [200, 404] - """ - - LOG.debug(u'GET flavor - name: %s', flavor) - data = None - detailed = request.get_param_as_bool('detailed') or False - - try: - data = self._ctrl.get(flavor, - project=project_id, - detailed=detailed) - # NOTE(wanghao): remove this in Newton. - data['pool'] = data['pool_group'] - except errors.FlavorDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - data['href'] = request.path - - response.body = transport_utils.to_json(data) - - def on_put(self, request, response, project_id, flavor): - """Registers a new flavor. Expects the following input: - - :: - - {"pool_group": "my-pool-group", "capabilities": {}} - - A capabilities object may also be provided. - - :returns: HTTP | [201, 400] - """ - - LOG.debug(u'PUT flavor - name: %s', flavor) - - data = wsgi_utils.load(request) - wsgi_utils.validate(self._validators['create'], data) - pool_group = data.get('pool_group') or data.get('pool') - try: - self._ctrl.create(flavor, - pool_group=pool_group, - project=project_id, - capabilities=data['capabilities']) - response.status = falcon.HTTP_201 - response.location = request.path - except errors.PoolGroupDoesNotExist as ex: - LOG.exception(ex) - description = (_(u'Flavor %(flavor)s could not be created. ' - u'Pool group %(pool_group)s does not exist') % - dict(flavor=flavor, pool_group=pool_group)) - raise falcon.HTTPBadRequest(_('Unable to create'), description) - - def on_delete(self, request, response, project_id, flavor): - """Deregisters a flavor. - - :returns: HTTP | [204] - """ - - LOG.debug(u'DELETE flavor - name: %s', flavor) - self._ctrl.delete(flavor, project=project_id) - response.status = falcon.HTTP_204 - - def on_patch(self, request, response, project_id, flavor): - """Allows one to update a flavors's pool and/or capabilities. - - This method expects the user to submit a JSON object - containing at least one of: 'pool_group', 'capabilities'. If - none are found, the request is flagged as bad. There is also - strict format checking through the use of - jsonschema. Appropriate errors are returned in each case for - badly formatted input. - - :returns: HTTP | [200, 400] - """ - - LOG.debug(u'PATCH flavor - name: %s', flavor) - data = wsgi_utils.load(request) - - EXPECT = ('pool_group', 'capabilities', 'pool') - if not any([(field in data) for field in EXPECT]): - LOG.debug(u'PATCH flavor, bad params') - raise wsgi_errors.HTTPBadRequestBody( - 'One of `pool_group` or `capabilities` or `pool` needs ' - 'to be specified' - ) - - for field in EXPECT: - wsgi_utils.validate(self._validators[field], data) - - fields = common_utils.fields(data, EXPECT, - pred=lambda v: v is not None) - # NOTE(wanghao): remove this in Newton. - if fields.get('pool') and fields.get('pool_group') is None: - fields['pool_group'] = fields.get('pool') - fields.pop('pool') - - try: - self._ctrl.update(flavor, project=project_id, **fields) - except errors.FlavorDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) diff --git a/zaqar/transport/wsgi/v1_1/health.py b/zaqar/transport/wsgi/v1_1/health.py deleted file mode 100644 index def3a3d1..00000000 --- a/zaqar/transport/wsgi/v1_1/health.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Copyright 2014 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_log import log as logging - -from zaqar.i18n import _ -from zaqar.transport import utils -from zaqar.transport.wsgi import errors as wsgi_errors - -LOG = logging.getLogger(__name__) - - -class Resource(object): - - __slots__ = ('_driver',) - - def __init__(self, driver): - self._driver = driver - - def on_get(self, req, resp, **kwargs): - try: - resp_dict = self._driver.health() - resp.body = utils.to_json(resp_dict) - except Exception as ex: - LOG.exception(ex) - description = _(u'Health status could not be read.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v1_1/homedoc.py b/zaqar/transport/wsgi/v1_1/homedoc.py deleted file mode 100644 index d5f0c5a2..00000000 --- a/zaqar/transport/wsgi/v1_1/homedoc.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import json - - -# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03 -JSON_HOME = { - 'resources': { - # ----------------------------------------------------------------- - # Queues - # ----------------------------------------------------------------- - 'rel/queues': { - 'href-template': '/v1.1/queues{?marker,limit,detailed}', - 'href-vars': { - 'marker': 'param/marker', - 'limit': 'param/queue_limit', - 'detailed': 'param/detailed', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue': { - 'href-template': '/v1.1/queues/{queue_name}', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['PUT', 'DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue_stats': { - 'href-template': '/v1.1/queues/{queue_name}/stats', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Messages - # ----------------------------------------------------------------- - 'rel/messages': { - 'href-template': ('/v1.1/queues/{queue_name}/messages' - '{?marker,limit,echo,include_claimed}'), - 'href-vars': { - 'queue_name': 'param/queue_name', - 'marker': 'param/marker', - 'limit': 'param/messages_limit', - 'echo': 'param/echo', - 'include_claimed': 'param/include_claimed', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/post_messages': { - 'href-template': '/v1.1/queues/{queue_name}/messages', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'], - }, - }, - 'rel/messages_delete': { - 'href-template': '/v1.1/queues/{queue_name}/messages{?ids,pop}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'ids': 'param/ids', - 'pop': 'param/pop' - }, - 'hints': { - 'allow': [ - 'DELETE' - ], - 'formats': { - 'application/json': {} - } - } - }, - 'rel/message_delete': { - 'href-template': '/v1.1/queues/{queue_name}/messages/{message_id}{?claim}', # noqa - 'href-vars': { - 'queue_name': 'param/queue_name', - 'message_id': 'param/message_id', - 'claim': 'param/claim_id' - }, - 'hints': { - 'allow': [ - 'DELETE' - ], - 'formats': { - 'application/json': {} - } - } - }, - - # ----------------------------------------------------------------- - # Claims - # ----------------------------------------------------------------- - 'rel/claim': { - 'href-template': '/v1.1/queues/{queue_name}/claims/{claim_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'claim_id': 'param/claim_id', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/post_claim': { - 'href-template': '/v1.1/queues/{queue_name}/claims{?limit}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'limit': 'param/claim_limit', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - }, - }, - 'rel/patch_claim': { - 'href-template': '/v1.1/queues/{queue_name}/claims/{claim_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'claim_id': 'param/claim_id', - }, - 'hints': { - 'allow': ['PATCH'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - }, - }, - 'rel/delete_claim': { - 'href-template': '/v1.1/queues/{queue_name}/claims/{claim_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'claim_id': 'param/claim_id', - }, - 'hints': { - 'allow': ['DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - # ----------------------------------------------------------------- - # Ping - # ----------------------------------------------------------------- - 'rel/ping': { - 'href-template': '/v1.1/ping', - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - } - } - } - } -} - - -ADMIN_RESOURCES = { - # ----------------------------------------------------------------- - # Pools - # ----------------------------------------------------------------- - 'rel/pools': { - 'href-template': '/v1.1/pools{?detailed,limit,marker}', - 'href-vars': { - 'detailed': 'param/detailed', - 'limit': 'param/pool_limit', - 'marker': 'param/marker', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/pool': { - 'href-template': '/v1.1/pools/{pool_name}', - 'href-vars': { - 'pool_name': 'param/pool_name', - }, - 'hints': { - 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Flavors - # ----------------------------------------------------------------- - 'rel/flavors': { - 'href-template': '/v1.1/flavors{?detailed,limit,marker}', - 'href-vars': { - 'detailed': 'param/detailed', - 'limit': 'param/flavor_limit', - 'marker': 'param/marker', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/flavor': { - 'href-template': '/v1.1/flavors/{flavor_name}', - 'href-vars': { - 'flavor_name': 'param/flavor_name', - }, - 'hints': { - 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Health - # ----------------------------------------------------------------- - 'rel/health': { - 'href': '/v1.1/health', - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, -} - - -class Resource(object): - - def __init__(self, conf): - if conf.admin_mode: - JSON_HOME['resources'].update(ADMIN_RESOURCES) - - document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4) - self.document_utf8 = document.encode('utf-8') - - def on_get(self, req, resp, project_id): - resp.data = self.document_utf8 - - resp.content_type = 'application/json-home' - resp.cache_control = ['max-age=86400'] - # status defaults to 200 diff --git a/zaqar/transport/wsgi/v1_1/messages.py b/zaqar/transport/wsgi/v1_1/messages.py deleted file mode 100644 index e140dd13..00000000 --- a/zaqar/transport/wsgi/v1_1/messages.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.common.transport.wsgi import helpers as wsgi_helpers -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - - -class CollectionResource(object): - - __slots__ = ( - '_message_controller', - '_queue_controller', - '_wsgi_conf', - '_validate', - '_message_post_spec', - ) - - def __init__(self, wsgi_conf, validate, - message_controller, queue_controller, - default_message_ttl): - - self._wsgi_conf = wsgi_conf - self._validate = validate - self._message_controller = message_controller - self._queue_controller = queue_controller - - self._message_post_spec = ( - ('ttl', int, default_message_ttl), - ('body', '*', None), - ) - - # ---------------------------------------------------------------------- - # Helpers - # ---------------------------------------------------------------------- - - def _get_by_id(self, base_path, project_id, queue_name, ids): - """Returns one or more messages from the queue by ID.""" - try: - self._validate.message_listing(limit=len(ids)) - messages = self._message_controller.bulk_get( - queue_name, - message_ids=ids, - project=project_id) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - messages = list(messages) - if not messages: - return None - - messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id']) - for m in messages] - - return {'messages': messages} - - def _get(self, req, project_id, queue_name): - client_uuid = wsgi_helpers.get_client_uuid(req) - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - req.get_param_as_bool('echo', store=kwargs) - req.get_param_as_bool('include_claimed', store=kwargs) - - try: - self._validate.message_listing(**kwargs) - results = self._message_controller.list( - queue_name, - project=project_id, - client_uuid=client_uuid, - **kwargs) - - # Buffer messages - cursor = next(results) - messages = list(cursor) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.QueueDoesNotExist as ex: - LOG.debug(ex) - messages = None - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - if not messages: - messages = [] - - else: - # Found some messages, so prepare the response - kwargs['marker'] = next(results) - base_path = req.path.rsplit('/', 1)[0] - messages = [wsgi_utils.format_message_v1_1(m, base_path, - m['claim_id']) - for m in messages] - - links = [] - if messages: - links = [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - - return { - 'messages': messages, - 'links': links - } - - # ---------------------------------------------------------------------- - # Interface - # ---------------------------------------------------------------------- - - @decorators.TransportLog("Messages collection") - def on_post(self, req, resp, project_id, queue_name): - client_uuid = wsgi_helpers.get_client_uuid(req) - - try: - # Place JSON size restriction before parsing - self._validate.message_length(req.content_length) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - # Deserialize and validate the incoming messages - document = wsgi_utils.deserialize(req.stream, req.content_length) - - if 'messages' not in document: - description = _(u'No messages were found in the request body.') - raise wsgi_errors.HTTPBadRequestAPI(description) - - messages = wsgi_utils.sanitize(document['messages'], - self._message_post_spec, - doctype=wsgi_utils.JSONArray) - - try: - self._validate.message_posting(messages) - - if not self._queue_controller.exists(queue_name, project_id): - self._queue_controller.create(queue_name, project=project_id) - - message_ids = self._message_controller.post( - queue_name, - messages=messages, - project=project_id, - client_uuid=client_uuid) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except storage_errors.MessageConflict as ex: - LOG.exception(ex) - description = _(u'No messages could be enqueued.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be enqueued.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare the response - ids_value = ','.join(message_ids) - resp.location = req.path + '?ids=' + ids_value - - hrefs = [req.path + '/' + id for id in message_ids] - body = {'resources': hrefs} - resp.body = utils.to_json(body) - resp.status = falcon.HTTP_201 - - @decorators.TransportLog("Messages collection") - def on_get(self, req, resp, project_id, queue_name): - ids = req.get_param_as_list('ids') - - if ids is None: - response = self._get(req, project_id, queue_name) - - else: - response = self._get_by_id(req.path.rsplit('/', 1)[0], project_id, - queue_name, ids) - - if response is None: - # NOTE(TheSriram): Trying to get a message by id, should - # return the message if its present, otherwise a 404 since - # the message might have been deleted. - msg = _(u'No messages with IDs: {ids} found in the queue {queue} ' - u'for project {project}.') - description = msg.format(queue=queue_name, project=project_id, - ids=ids) - raise wsgi_errors.HTTPNotFound(description) - - else: - resp.body = utils.to_json(response) - # status defaults to 200 - - @decorators.TransportLog("Messages collection") - def on_delete(self, req, resp, project_id, queue_name): - ids = req.get_param_as_list('ids') - pop_limit = req.get_param_as_int('pop') - try: - self._validate.message_deletion(ids, pop_limit) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - if ids: - resp.status = self._delete_messages_by_id(queue_name, ids, - project_id) - - elif pop_limit: - resp.status, resp.body = self._pop_messages(queue_name, - project_id, - pop_limit) - - def _delete_messages_by_id(self, queue_name, ids, project_id): - try: - self._message_controller.bulk_delete( - queue_name, - message_ids=ids, - project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - return falcon.HTTP_204 - - def _pop_messages(self, queue_name, project_id, pop_limit): - try: - LOG.debug(u'POP messages - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - messages = self._message_controller.pop( - queue_name, - project=project_id, - limit=pop_limit) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be popped.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - if not messages: - messages = [] - body = {'messages': messages} - body = utils.to_json(body) - - return falcon.HTTP_200, body - - -class ItemResource(object): - - __slots__ = '_message_controller' - - def __init__(self, message_controller): - self._message_controller = message_controller - - @decorators.TransportLog("Messages item") - def on_get(self, req, resp, project_id, queue_name, message_id): - try: - message = self._message_controller.get( - queue_name, - message_id, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - message['href'] = req.path - message = wsgi_utils.format_message_v1_1(message, - req.path.rsplit('/', 2)[0], - message['claim_id']) - - resp.body = utils.to_json(message) - # status defaults to 200 - - @decorators.TransportLog("Messages item") - def on_delete(self, req, resp, project_id, queue_name, message_id): - error_title = _(u'Unable to delete') - - try: - self._message_controller.delete( - queue_name, - message_id=message_id, - project=project_id, - claim=req.get_param('claim_id')) - - except storage_errors.MessageNotClaimed as ex: - LOG.debug(ex) - description = _(u'A claim was specified, but the message ' - u'is not currently claimed.') - raise falcon.HTTPBadRequest(error_title, description) - - except storage_errors.ClaimDoesNotExist as ex: - LOG.debug(ex) - description = _(u'The specified claim does not exist or ' - u'has expired.') - raise falcon.HTTPBadRequest(error_title, description) - - except storage_errors.NotPermitted as ex: - LOG.debug(ex) - description = _(u'This message is claimed; it cannot be ' - u'deleted without a valid claim ID.') - raise falcon.HTTPForbidden(error_title, description) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Alles guete - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v1_1/ping.py b/zaqar/transport/wsgi/v1_1/ping.py deleted file mode 100644 index 1ea34428..00000000 --- a/zaqar/transport/wsgi/v1_1/ping.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014 IBM Corp. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon - - -class Resource(object): - - __slots__ = ('_driver',) - - def __init__(self, driver): - self._driver = driver - - def on_get(self, req, resp, **kwargs): - resp.status = (falcon.HTTP_204 if self._driver.is_alive() - else falcon.HTTP_503) - - def on_head(self, req, resp, **kwargs): - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v1_1/pools.py b/zaqar/transport/wsgi/v1_1/pools.py deleted file mode 100644 index ea6e3fb3..00000000 --- a/zaqar/transport/wsgi/v1_1/pools.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""pools: a resource to handle storage pool management - -A pool is added by an operator by interacting with the -pooling-related endpoints. When specifying a pool, the -following fields are required: - -:: - - { - "name": string, - "weight": integer, - "uri": string::uri - } - -Furthermore, depending on the underlying storage type of pool being -registered, there is an optional field: -:: - - { - "options": {...} - } -""" - -import falcon -import jsonschema -from oslo_log import log -import six - -from zaqar.common.api.schemas import pools as schema -from zaqar.common import utils as common_utils -from zaqar.i18n import _ -from zaqar.storage import errors -from zaqar.storage import utils as storage_utils -from zaqar.transport import utils as transport_utils -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = log.getLogger(__name__) - - -class Listing(object): - """A resource to list registered pools - - :param pools_controller: means to interact with storage - """ - - def __init__(self, pools_controller): - self._ctrl = pools_controller - - def on_get(self, request, response, project_id): - """Returns a pool listing as objects embedded in an object: - - :: - - { - "pools": [ - {"href": "", "weight": 100, "uri": ""}, - ... - ], - "links": [ - {"href": "", "rel": "next"} - ] - } - - :returns: HTTP | 200 - """ - - LOG.debug(u'LIST pools') - - store = {} - request.get_param('marker', store=store) - request.get_param_as_int('limit', store=store) - request.get_param_as_bool('detailed', store=store) - - cursor = self._ctrl.list(**store) - pools = list(next(cursor)) - - results = {'links': []} - - if pools: - store['marker'] = next(cursor) - - for entry in pools: - entry['href'] = request.path + '/' + entry['name'] - - results['links'] = [ - { - 'rel': 'next', - 'href': request.path + falcon.to_query_str(store) - } - ] - - results['pools'] = pools - - response.content_location = request.relative_uri - response.body = transport_utils.to_json(results) - response.status = falcon.HTTP_200 - - -class Resource(object): - """A handler for individual pool. - - :param pools_controller: means to interact with storage - """ - - def __init__(self, pools_controller): - self._ctrl = pools_controller - validator_type = jsonschema.Draft4Validator - self._validators = { - 'weight': validator_type(schema.patch_weight), - 'uri': validator_type(schema.patch_uri), - 'group': validator_type(schema.patch_uri), - 'options': validator_type(schema.patch_options), - 'create': validator_type(schema.create) - } - - def on_get(self, request, response, project_id, pool): - """Returns a JSON object for a single pool entry: - - :: - - {"weight": 100, "uri": "", options: {...}} - - :returns: HTTP | [200, 404] - """ - - LOG.debug(u'GET pool - name: %s', pool) - data = None - detailed = request.get_param_as_bool('detailed') or False - - try: - data = self._ctrl.get(pool, detailed) - - except errors.PoolDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - data['href'] = request.path - - response.body = transport_utils.to_json(data) - - def on_put(self, request, response, project_id, pool): - """Registers a new pool. Expects the following input: - - :: - - {"weight": 100, "uri": ""} - - An options object may also be provided. - - :returns: HTTP | [201, 204] - """ - - LOG.debug(u'PUT pool - name: %s', pool) - - conf = self._ctrl.driver.conf - data = wsgi_utils.load(request) - wsgi_utils.validate(self._validators['create'], data) - if not storage_utils.can_connect(data['uri'], conf=conf): - raise wsgi_errors.HTTPBadRequestBody( - 'cannot connect to %s' % data['uri'] - ) - try: - self._ctrl.create(pool, weight=data['weight'], - uri=data['uri'], - group=data.get('group'), - options=data.get('options', {})) - response.status = falcon.HTTP_201 - response.location = request.path - except errors.PoolCapabilitiesMismatch as e: - LOG.exception(e) - title = _(u'Unable to create pool') - raise falcon.HTTPBadRequest(title, six.text_type(e)) - except errors.PoolAlreadyExists as e: - LOG.exception(e) - raise wsgi_errors.HTTPConflict(six.text_type(e)) - - def on_delete(self, request, response, project_id, pool): - """Deregisters a pool. - - :returns: HTTP | [204, 403] - """ - - LOG.debug(u'DELETE pool - name: %s', pool) - - try: - self._ctrl.delete(pool) - except errors.PoolInUseByFlavor as ex: - LOG.exception(ex) - title = _(u'Unable to delete') - description = _(u'This pool is used by flavors {flavor}; ' - u'It cannot be deleted.') - description = description.format(flavor=ex.flavor) - raise falcon.HTTPForbidden(title, description) - - response.status = falcon.HTTP_204 - - def on_patch(self, request, response, project_id, pool): - """Allows one to update a pool's weight, uri, and/or options. - - This method expects the user to submit a JSON object - containing at least one of: 'uri', 'weight', 'group', 'options'. If - none are found, the request is flagged as bad. There is also - strict format checking through the use of - jsonschema. Appropriate errors are returned in each case for - badly formatted input. - - :returns: HTTP | 200,400 - """ - - LOG.debug(u'PATCH pool - name: %s', pool) - data = wsgi_utils.load(request) - - EXPECT = ('weight', 'uri', 'group', 'options') - if not any([(field in data) for field in EXPECT]): - LOG.debug(u'PATCH pool, bad params') - raise wsgi_errors.HTTPBadRequestBody( - 'One of `uri`, `weight`, `group`, or `options` needs ' - 'to be specified' - ) - - for field in EXPECT: - wsgi_utils.validate(self._validators[field], data) - - conf = self._ctrl.driver.conf - if 'uri' in data and not storage_utils.can_connect(data['uri'], - conf=conf): - raise wsgi_errors.HTTPBadRequestBody( - 'cannot connect to %s' % data['uri'] - ) - fields = common_utils.fields(data, EXPECT, - pred=lambda v: v is not None) - - try: - self._ctrl.update(pool, **fields) - except errors.PoolDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) diff --git a/zaqar/transport/wsgi/v1_1/queues.py b/zaqar/transport/wsgi/v1_1/queues.py deleted file mode 100644 index e61c4808..00000000 --- a/zaqar/transport/wsgi/v1_1/queues.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - - -LOG = logging.getLogger(__name__) - - -class ItemResource(object): - - __slots__ = ('_validate', '_queue_controller', '_message_controller') - - def __init__(self, validate, queue_controller, message_controller): - self._validate = validate - self._queue_controller = queue_controller - self._message_controller = message_controller - - @decorators.TransportLog("Queue metadata") - def on_get(self, req, resp, project_id, queue_name): - try: - resp_dict = self._queue_controller.get(queue_name, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue metadata could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - @decorators.TransportLog("Queue item") - def on_put(self, req, resp, project_id, queue_name): - try: - # Place JSON size restriction before parsing - self._validate.queue_metadata_length(req.content_length) - # Deserialize queue metadata - metadata = None - if req.content_length: - document = wsgi_utils.deserialize(req.stream, - req.content_length) - metadata = wsgi_utils.sanitize(document) - # NOTE(Eva-i): reserved queue attributes is Zaqar's feature since - # API v2. But we have to ensure the bad data will not come from - # older APIs, so we validate metadata here. - self._validate.queue_metadata_putting(metadata) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - try: - created = self._queue_controller.create(queue_name, - metadata=metadata, - project=project_id) - - except storage_errors.FlavorDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_201 if created else falcon.HTTP_204 - resp.location = req.path - - @decorators.TransportLog("Queue item") - def on_delete(self, req, resp, project_id, queue_name): - try: - self._queue_controller.delete(queue_name, project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 - - -class CollectionResource(object): - - __slots__ = ('_queue_controller', '_validate') - - def __init__(self, validate, queue_controller): - self._queue_controller = queue_controller - self._validate = validate - - @decorators.TransportLog("Queue collection") - def on_get(self, req, resp, project_id): - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - req.get_param_as_bool('detailed', store=kwargs) - - try: - self._validate.queue_listing(**kwargs) - results = self._queue_controller.list(project=project_id, **kwargs) - - # Buffer list of queues - queues = list(next(results)) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queues could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Got some. Prepare the response. - kwargs['marker'] = next(results) or kwargs.get('marker', '') - for each_queue in queues: - each_queue['href'] = req.path + '/' + each_queue['name'] - - links = [] - if queues: - links = [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - - response_body = { - 'queues': queues, - 'links': links - } - - resp.body = utils.to_json(response_body) - # status defaults to 200 diff --git a/zaqar/transport/wsgi/v1_1/stats.py b/zaqar/transport/wsgi/v1_1/stats.py deleted file mode 100644 index f816072e..00000000 --- a/zaqar/transport/wsgi/v1_1/stats.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -import six - -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import utils -from zaqar.transport.wsgi import errors as wsgi_errors - - -LOG = logging.getLogger(__name__) - - -class Resource(object): - - __slots__ = '_queue_ctrl' - - def __init__(self, queue_controller): - self._queue_ctrl = queue_controller - - def on_get(self, req, resp, project_id, queue_name): - try: - resp_dict = self._queue_ctrl.stats(queue_name, - project=project_id) - - message_stats = resp_dict['messages'] - - if message_stats['total'] != 0: - base_path = req.path[:req.path.rindex('/')] + '/messages/' - - newest = message_stats['newest'] - newest['href'] = base_path + newest['id'] - del newest['id'] - - oldest = message_stats['oldest'] - oldest['href'] = base_path + oldest['id'] - del oldest['id'] - - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - except (storage_errors.QueueDoesNotExist, - storage_errors.QueueIsEmpty) as ex: - resp_dict = { - 'messages': { - 'claimed': 0, - 'free': 0, - 'total': 0 - } - } - resp.body = utils.to_json(resp_dict) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue stats could not be read.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v2_0/__init__.py b/zaqar/transport/wsgi/v2_0/__init__.py deleted file mode 100644 index cc4b5b0c..00000000 --- a/zaqar/transport/wsgi/v2_0/__init__.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from zaqar.common import decorators -from zaqar.transport.wsgi.v2_0 import claims -from zaqar.transport.wsgi.v2_0 import flavors -from zaqar.transport.wsgi.v2_0 import health -from zaqar.transport.wsgi.v2_0 import homedoc -from zaqar.transport.wsgi.v2_0 import messages -from zaqar.transport.wsgi.v2_0 import ping -from zaqar.transport.wsgi.v2_0 import pools -from zaqar.transport.wsgi.v2_0 import purge -from zaqar.transport.wsgi.v2_0 import queues -from zaqar.transport.wsgi.v2_0 import stats -from zaqar.transport.wsgi.v2_0 import subscriptions -from zaqar.transport.wsgi.v2_0 import urls - - -VERSION = { - 'id': '2', - 'status': 'CURRENT', - 'updated': '2014-9-24T04:06:47Z', - 'media-types': [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.messaging-v2+json' - } - ], - 'links': [ - { - 'href': '/v2/', - 'rel': 'self' - } - ] -} - - -@decorators.api_version_manager(VERSION) -def public_endpoints(driver, conf): - queue_controller = driver._storage.queue_controller - message_controller = driver._storage.message_controller - claim_controller = driver._storage.claim_controller - subscription_controller = driver._storage.subscription_controller - - defaults = driver._defaults - - return [ - # Home - ('/', - homedoc.Resource(conf)), - - # Queues Endpoints - ('/queues', - queues.CollectionResource(driver._validate, - queue_controller)), - ('/queues/{queue_name}', - queues.ItemResource(driver._validate, - queue_controller, - message_controller)), - ('/queues/{queue_name}/stats', - stats.Resource(queue_controller)), - ('/queues/{queue_name}/purge', - purge.Resource(driver)), - # Messages Endpoints - ('/queues/{queue_name}/messages', - messages.CollectionResource(driver._wsgi_conf, - driver._validate, - message_controller, - queue_controller, - defaults.message_ttl)), - ('/queues/{queue_name}/messages/{message_id}', - messages.ItemResource(message_controller)), - - # Claims Endpoints - ('/queues/{queue_name}/claims', - claims.CollectionResource(driver._wsgi_conf, - driver._validate, - claim_controller, - defaults.claim_ttl, - defaults.claim_grace)), - ('/queues/{queue_name}/claims/{claim_id}', - claims.ItemResource(driver._wsgi_conf, - driver._validate, - claim_controller, - defaults.claim_ttl, - defaults.claim_grace)), - - # Ping - ('/ping', - ping.Resource(driver._storage)), - - # Subscription Endpoints - ('/queues/{queue_name}/subscriptions', - subscriptions.CollectionResource(driver._validate, - subscription_controller, - defaults.subscription_ttl, - queue_controller, - conf)), - - ('/queues/{queue_name}/subscriptions/{subscription_id}', - subscriptions.ItemResource(driver._validate, - subscription_controller)), - - ('/queues/{queue_name}/subscriptions/{subscription_id}/confirm', - subscriptions.ConfirmResource(driver._validate, - subscription_controller, - conf)), - - # Pre-Signed URL Endpoint - ('/queues/{queue_name}/share', urls.Resource(driver)), - ] - - -@decorators.api_version_manager(VERSION) -def private_endpoints(driver, conf): - - catalogue = [ - # Health - ('/health', - health.Resource(driver._storage)), - ] - - if conf.pooling: - pools_controller = driver._control.pools_controller - flavors_controller = driver._control.flavors_controller - validate = driver._validate - - catalogue.extend([ - ('/pools', - pools.Listing(pools_controller, validate)), - ('/pools/{pool}', - pools.Resource(pools_controller)), - ('/flavors', - flavors.Listing(flavors_controller, pools_controller, - validate)), - ('/flavors/{flavor}', - flavors.Resource(flavors_controller, pools_controller)), - ]) - - return catalogue diff --git a/zaqar/transport/wsgi/v2_0/claims.py b/zaqar/transport/wsgi/v2_0/claims.py deleted file mode 100644 index c6df69a0..00000000 --- a/zaqar/transport/wsgi/v2_0/claims.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - - -class CollectionResource(object): - __slots__ = ( - '_claim_controller', - '_validate', - '_claim_post_spec', - '_default_meta', - ) - - def __init__(self, wsgi_conf, validate, claim_controller, - default_claim_ttl, default_grace_ttl): - - self._claim_controller = claim_controller - self._validate = validate - - self._claim_post_spec = ( - ('ttl', int, default_claim_ttl), - ('grace', int, default_grace_ttl), - ) - - # NOTE(kgriffs): Create this once up front, rather than creating - # a new dict every time, for the sake of performance. - self._default_meta = { - 'ttl': default_claim_ttl, - 'grace': default_grace_ttl, - } - - @decorators.TransportLog("Claims collection") - @acl.enforce("claims:create") - def on_post(self, req, resp, project_id, queue_name): - # Check for an explicit limit on the # of messages to claim - limit = req.get_param_as_int('limit') - claim_options = {} if limit is None else {'limit': limit} - - # NOTE(kgriffs): Clients may or may not actually include the - # Content-Length header when the body is empty; the following - # check works for both 0 and None. - if not req.content_length: - # No values given, so use defaults - metadata = self._default_meta - else: - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document, self._claim_post_spec) - - # Claim some messages - try: - self._validate.claim_creation(metadata, limit=limit) - - cid, msgs = self._claim_controller.create( - queue_name, - metadata=metadata, - project=project_id, - **claim_options) - - # Buffer claimed messages - # TODO(kgriffs): optimize, along with serialization (below) - resp_msgs = list(msgs) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Serialize claimed messages, if any. This logic assumes - # the storage driver returned well-formed messages. - if len(resp_msgs) != 0: - base_path = req.path.rpartition('/')[0] - resp_msgs = [wsgi_utils.format_message_v1_1(msg, base_path, cid) - for msg in resp_msgs] - - resp.location = req.path + '/' + cid - resp.body = utils.to_json({'messages': resp_msgs}) - resp.status = falcon.HTTP_201 - else: - resp.status = falcon.HTTP_204 - - -class ItemResource(object): - - __slots__ = ('_claim_controller', '_validate', '_claim_patch_spec') - - def __init__(self, wsgi_conf, validate, claim_controller, - default_claim_ttl, default_grace_ttl): - self._claim_controller = claim_controller - self._validate = validate - - self._claim_patch_spec = ( - ('ttl', int, default_claim_ttl), - ('grace', int, default_grace_ttl), - ) - - @decorators.TransportLog("Claims item") - @acl.enforce("claims:get") - def on_get(self, req, resp, project_id, queue_name, claim_id): - try: - meta, msgs = self._claim_controller.get( - queue_name, - claim_id=claim_id, - project=project_id) - - # Buffer claimed messages - # TODO(kgriffs): Optimize along with serialization (see below) - meta['messages'] = list(msgs) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be queried.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Serialize claimed messages - # TODO(kgriffs): Optimize - base_path = req.path.rsplit('/', 2)[0] - meta['messages'] = [wsgi_utils.format_message_v1_1(msg, base_path, - claim_id) - for msg in meta['messages']] - - meta['href'] = req.path - del meta['id'] - - resp.body = utils.to_json(meta) - # status defaults to 200 - - @decorators.TransportLog("Claims item") - @acl.enforce("claims:update") - def on_patch(self, req, resp, project_id, queue_name, claim_id): - # Read claim metadata (e.g., TTL) and raise appropriate - # HTTP errors as needed. - document = wsgi_utils.deserialize(req.stream, req.content_length) - metadata = wsgi_utils.sanitize(document, self._claim_patch_spec) - - try: - self._validate.claim_updating(metadata) - self._claim_controller.update(queue_name, - claim_id=claim_id, - metadata=metadata, - project=project_id) - - resp.status = falcon.HTTP_204 - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be updated.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - @decorators.TransportLog("Claims item") - @acl.enforce("claims:delete") - def on_delete(self, req, resp, project_id, queue_name, claim_id): - try: - self._claim_controller.delete(queue_name, - claim_id=claim_id, - project=project_id) - - resp.status = falcon.HTTP_204 - - except Exception as ex: - LOG.exception(ex) - description = _(u'Claim could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v2_0/flavors.py b/zaqar/transport/wsgi/v2_0/flavors.py deleted file mode 100644 index 92f3654d..00000000 --- a/zaqar/transport/wsgi/v2_0/flavors.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -import jsonschema -from oslo_log import log -import six - -from zaqar.common.api.schemas import flavors as schema -from zaqar.common import decorators -from zaqar.common import utils as common_utils -from zaqar.i18n import _ -from zaqar.storage import errors -from zaqar.transport import acl -from zaqar.transport import utils as transport_utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = log.getLogger(__name__) - - -class Listing(object): - """A resource to list registered flavors - - :param flavors_controller: means to interact with storage - """ - - def __init__(self, flavors_controller, pools_controller, validate): - self._ctrl = flavors_controller - self._pools_ctrl = pools_controller - self._validate = validate - - @decorators.TransportLog("Flavors collection") - @acl.enforce("flavors:get_all") - def on_get(self, request, response, project_id): - """Returns a flavor listing as objects embedded in an object: - - :: - - { - "flavors": [ - {"href": "", "capabilities": {}, "pool_group": ""}, - ... - ], - "links": [ - {"rel": "next", "href": ""}, - ... - ] - } - - :returns: HTTP | 200 - """ - - LOG.debug(u'LIST flavors for project_id %s', project_id) - - store = {} - request.get_param('marker', store=store) - request.get_param_as_int('limit', store=store) - detailed = request.get_param_as_bool('detailed') - - try: - self._validate.flavor_listing(**store) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - cursor = self._ctrl.list(project=project_id, **store) - flavors = list(next(cursor)) - - results = {'links': []} - - if flavors: - store['marker'] = next(cursor) - - for entry in flavors: - entry['href'] = request.path + '/' + entry['name'] - pool_group = entry['pool_group'] - # NOTE(wanghao): remove this in Newton. - entry['pool'] = entry['pool_group'] - if detailed: - caps = self._pools_ctrl.capabilities(group=pool_group) - entry['capabilities'] = [str(cap).split('.')[-1] - for cap in caps] - - if detailed is not None: - store['detailed'] = detailed - - if flavors: - results['links'] = [ - { - 'rel': 'next', - 'href': request.path + falcon.to_query_str(store) - } - ] - - results['flavors'] = flavors - - response.body = transport_utils.to_json(results) - response.status = falcon.HTTP_200 - - -class Resource(object): - """A handler for individual flavor. - - :param flavors_controller: means to interact with storage - """ - - def __init__(self, flavors_controller, pools_controller): - self._ctrl = flavors_controller - self._pools_ctrl = pools_controller - - validator_type = jsonschema.Draft4Validator - self._validators = { - 'create': validator_type(schema.create), - 'pool_group': validator_type(schema.patch_pool_group), - # NOTE(wanghao): Remove this in Newton. - 'pool': validator_type(schema.patch_pool), - 'capabilities': validator_type(schema.patch_capabilities), - } - - @decorators.TransportLog("Flavors item") - @acl.enforce("flavors:get") - def on_get(self, request, response, project_id, flavor): - """Returns a JSON object for a single flavor entry: - - :: - - {"pool": "", capabilities: {...}} - - :returns: HTTP | [200, 404] - """ - - LOG.debug(u'GET flavor - name: %s', flavor) - data = None - - try: - data = self._ctrl.get(flavor, project=project_id) - pool_group = data['pool_group'] - # NOTE(wanghao): remove this in Newton. - data['pool'] = data['pool_group'] - capabilities = self._pools_ctrl.capabilities(group=pool_group) - data['capabilities'] = [str(cap).split('.')[-1] - for cap in capabilities] - - except errors.FlavorDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - data['href'] = request.path - - response.body = transport_utils.to_json(data) - - @decorators.TransportLog("Flavors item") - @acl.enforce("flavors:create") - def on_put(self, request, response, project_id, flavor): - """Registers a new flavor. Expects the following input: - - :: - - {"pool_group": "my-pool-group", "capabilities": {}} - - A capabilities object may also be provided. - - :returns: HTTP | [201, 400] - """ - - LOG.debug(u'PUT flavor - name: %s', flavor) - - data = wsgi_utils.load(request) - wsgi_utils.validate(self._validators['create'], data) - pool_group = data.get('pool_group') or data.get('pool') - capabilities = self._pools_ctrl.capabilities(pool_group) - try: - self._ctrl.create(flavor, - pool_group=pool_group, - project=project_id, - capabilities=capabilities) - response.status = falcon.HTTP_201 - response.location = request.path - except errors.PoolGroupDoesNotExist as ex: - LOG.exception(ex) - description = (_(u'Flavor %(flavor)s could not be created. ' - u'Pool group %(pool_group)s does not exist') % - dict(flavor=flavor, pool_group=pool_group)) - raise falcon.HTTPBadRequest(_('Unable to create'), description) - - @decorators.TransportLog("Flavors item") - @acl.enforce("flavors:delete") - def on_delete(self, request, response, project_id, flavor): - """Deregisters a flavor. - - :returns: HTTP | [204] - """ - - LOG.debug(u'DELETE flavor - name: %s', flavor) - self._ctrl.delete(flavor, project=project_id) - response.status = falcon.HTTP_204 - - @decorators.TransportLog("Flavors item") - @acl.enforce("flavors:update") - def on_patch(self, request, response, project_id, flavor): - """Allows one to update a flavors's pool_group. - - This method expects the user to submit a JSON object - containing 'pool_group'. If none is found, the request is flagged - as bad. There is also strict format checking through the use of - jsonschema. Appropriate errors are returned in each case for - badly formatted input. - - :returns: HTTP | [200, 400] - """ - - LOG.debug(u'PATCH flavor - name: %s', flavor) - data = wsgi_utils.load(request) - - EXPECT = ('pool_group', 'pool') - if not any([(field in data) for field in EXPECT]): - LOG.debug(u'PATCH flavor, bad params') - raise wsgi_errors.HTTPBadRequestBody( - '`pool_group` or `pool` needs to be specified' - ) - - for field in EXPECT: - wsgi_utils.validate(self._validators[field], data) - - fields = common_utils.fields(data, EXPECT, - pred=lambda v: v is not None) - # NOTE(wanghao): remove this in Newton. - if fields.get('pool') and fields.get('pool_group') is None: - fields['pool_group'] = fields.get('pool') - fields.pop('pool') - - resp_data = None - try: - self._ctrl.update(flavor, project=project_id, **fields) - resp_data = self._ctrl.get(flavor, project=project_id) - capabilities = self._pools_ctrl.capabilities( - group=resp_data['pool_group']) - resp_data['capabilities'] = [str(cap).split('.')[-1] - for cap in capabilities] - except errors.FlavorDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - resp_data['href'] = request.path - response.body = transport_utils.to_json(resp_data) diff --git a/zaqar/transport/wsgi/v2_0/health.py b/zaqar/transport/wsgi/v2_0/health.py deleted file mode 100644 index 26790600..00000000 --- a/zaqar/transport/wsgi/v2_0/health.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2014 Rackspace, Inc. -# Copyright 2014 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -from oslo_log import log as logging - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport.wsgi import errors as wsgi_errors - -LOG = logging.getLogger(__name__) - - -class Resource(object): - - __slots__ = ('_driver',) - - def __init__(self, driver): - self._driver = driver - - @decorators.TransportLog("Health item") - @acl.enforce("health:get") - def on_get(self, req, resp, **kwargs): - try: - resp_dict = self._driver.health() - resp.body = utils.to_json(resp_dict) - except Exception as ex: - LOG.exception(ex) - description = _(u'Health status could not be read.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v2_0/homedoc.py b/zaqar/transport/wsgi/v2_0/homedoc.py deleted file mode 100644 index f019a2fa..00000000 --- a/zaqar/transport/wsgi/v2_0/homedoc.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import json - - -# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03 -JSON_HOME = { - 'resources': { - # ----------------------------------------------------------------- - # Queues - # ----------------------------------------------------------------- - 'rel/queues': { - 'href-template': '/v2/queues{?marker,limit,detailed}', - 'href-vars': { - 'marker': 'param/marker', - 'limit': 'param/queue_limit', - 'detailed': 'param/detailed', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue': { - 'href-template': '/v2/queues/{queue_name}', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['GET', 'PUT', 'DELETE', 'PATCH'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue_stats': { - 'href-template': '/v2/queues/{queue_name}/stats', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/queue_share': { - 'href-template': '/v2/queues/{queue_name}/share', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'], - }, - }, - 'rel/queue_purge': { - 'href-template': '/v2/queues/{queue_name}/purge', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'], - }, - }, - - # ----------------------------------------------------------------- - # Messages - # ----------------------------------------------------------------- - 'rel/messages': { - 'href-template': ('/v2/queues/{queue_name}/messages' - '{?marker,limit,echo,include_claimed}'), - 'href-vars': { - 'queue_name': 'param/queue_name', - 'marker': 'param/marker', - 'limit': 'param/messages_limit', - 'echo': 'param/echo', - 'include_claimed': 'param/include_claimed', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/post_messages': { - 'href-template': '/v2/queues/{queue_name}/messages', - 'href-vars': { - 'queue_name': 'param/queue_name', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'], - }, - }, - 'rel/messages_delete': { - 'href-template': '/v2/queues/{queue_name}/messages{?ids,pop}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'ids': 'param/ids', - 'pop': 'param/pop' - }, - 'hints': { - 'allow': [ - 'DELETE' - ], - 'formats': { - 'application/json': {} - } - } - }, - 'rel/message_delete': { - 'href-template': '/v2/queues/{queue_name}/messages/{message_id}{?claim}', # noqa - 'href-vars': { - 'queue_name': 'param/queue_name', - 'message_id': 'param/message_id', - 'claim': 'param/claim_id' - }, - 'hints': { - 'allow': [ - 'DELETE' - ], - 'formats': { - 'application/json': {} - } - } - }, - 'rel/message_get': { - 'href-template': '/v2/queues/{queue_name}/messages/{message_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'message_id': 'param/message_id' - }, - 'hints': { - 'allow': [ - 'GET' - ], - 'formats': { - 'application/json': {} - } - } - }, - - # ----------------------------------------------------------------- - # Claims - # ----------------------------------------------------------------- - 'rel/claim': { - 'href-template': '/v2/queues/{queue_name}/claims/{claim_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'claim_id': 'param/claim_id', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/post_claim': { - 'href-template': '/v2/queues/{queue_name}/claims{?limit}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'limit': 'param/claim_limit', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - }, - }, - 'rel/patch_claim': { - 'href-template': '/v2/queues/{queue_name}/claims/{claim_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'claim_id': 'param/claim_id', - }, - 'hints': { - 'allow': ['PATCH'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - }, - }, - 'rel/delete_claim': { - 'href-template': '/v2/queues/{queue_name}/claims/{claim_id}', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'claim_id': 'param/claim_id', - }, - 'hints': { - 'allow': ['DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Subscriptions - # ----------------------------------------------------------------- - 'rel/subscriptions_get': { - 'href-template': '/v2/queues/{queue_name}/subscriptions{?marker,limit}', # noqa - 'href-vars': { - 'queue_name': 'param/queue_name', - 'marker': 'param/marker', - 'limit': 'param/subscription_limit', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - } - } - }, - 'rel/subscriptions_post': { - 'href-template': '/v2/queues/{queue_name}/subscriptions', - 'href-vars': { - 'queue_name': 'param/queue_name', - 'limit': 'param/subscription_limit', - }, - 'hints': { - 'allow': ['POST'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - } - }, - 'rel/subscription': { - 'href-template': '/v2/queues/{queue_name}/subscriptions/{subscriptions_id}', # noqa - 'href-vars': { - 'queue_name': 'param/queue_name', - 'subscriptions_id': 'param/subscriptions_id', - }, - 'hints': { - 'allow': ['GET', 'DELETE'], - 'formats': { - 'application/json': {}, - } - } - }, - 'rel/subscription_patch': { - 'href-template': '/v2/queues/{queue_name}/subscriptions/{subscriptions_id}', # noqa - 'href-vars': { - 'queue_name': 'param/queue_name', - 'subscriptions_id': 'param/subscriptions_id', - }, - 'hints': { - 'allow': ['PATCH'], - 'formats': { - 'application/json': {}, - }, - 'accept-post': ['application/json'] - } - }, - # ----------------------------------------------------------------- - # Ping - # ----------------------------------------------------------------- - 'rel/ping': { - 'href-template': '/v2/ping', - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - } - } - } - } -} - - -ADMIN_RESOURCES = { - # ----------------------------------------------------------------- - # Pools - # ----------------------------------------------------------------- - 'rel/pools': { - 'href-template': '/v2/pools{?detailed,limit,marker}', - 'href-vars': { - 'detailed': 'param/detailed', - 'limit': 'param/pool_limit', - 'marker': 'param/marker', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/pool': { - 'href-template': '/v2/pools/{pool_name}', - 'href-vars': { - 'pool_name': 'param/pool_name', - }, - 'hints': { - 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Flavors - # ----------------------------------------------------------------- - 'rel/flavors': { - 'href-template': '/v2/flavors{?detailed,limit,marker}', - 'href-vars': { - 'detailed': 'param/detailed', - 'limit': 'param/flavor_limit', - 'marker': 'param/marker', - }, - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, - 'rel/flavor': { - 'href-template': '/v2/flavors/{flavor_name}', - 'href-vars': { - 'flavor_name': 'param/flavor_name', - }, - 'hints': { - 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], - 'formats': { - 'application/json': {}, - }, - }, - }, - - # ----------------------------------------------------------------- - # Health - # ----------------------------------------------------------------- - 'rel/health': { - 'href': '/v2/health', - 'hints': { - 'allow': ['GET'], - 'formats': { - 'application/json': {}, - }, - }, - }, -} - - -class Resource(object): - - def __init__(self, conf): - if conf.admin_mode: - JSON_HOME['resources'].update(ADMIN_RESOURCES) - - document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4) - self.document_utf8 = document.encode('utf-8') - - def on_get(self, req, resp, project_id): - resp.data = self.document_utf8 - - resp.content_type = 'application/json-home' - resp.cache_control = ['max-age=86400'] - # status defaults to 200 diff --git a/zaqar/transport/wsgi/v2_0/messages.py b/zaqar/transport/wsgi/v2_0/messages.py deleted file mode 100644 index ca45e74b..00000000 --- a/zaqar/transport/wsgi/v2_0/messages.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.common.transport.wsgi import helpers as wsgi_helpers -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - - -class CollectionResource(object): - - __slots__ = ( - '_message_controller', - '_queue_controller', - '_wsgi_conf', - '_validate', - '_message_post_spec', - '_default_message_ttl' - ) - - def __init__(self, wsgi_conf, validate, - message_controller, queue_controller, - default_message_ttl): - - self._wsgi_conf = wsgi_conf - self._validate = validate - self._message_controller = message_controller - self._queue_controller = queue_controller - self._default_message_ttl = default_message_ttl - - self._message_post_spec = ( - ('ttl', int, self._default_message_ttl), - ('body', '*', None), - ) - - # ---------------------------------------------------------------------- - # Helpers - # ---------------------------------------------------------------------- - - def _get_by_id(self, base_path, project_id, queue_name, ids): - """Returns one or more messages from the queue by ID.""" - try: - self._validate.message_listing(limit=len(ids)) - messages = self._message_controller.bulk_get( - queue_name, - message_ids=ids, - project=project_id) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - messages = list(messages) - if not messages: - return None - - messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id']) - for m in messages] - - return {'messages': messages} - - def _get(self, req, project_id, queue_name): - client_uuid = wsgi_helpers.get_client_uuid(req) - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - req.get_param_as_bool('echo', store=kwargs) - req.get_param_as_bool('include_claimed', store=kwargs) - - try: - self._validate.message_listing(**kwargs) - results = self._message_controller.list( - queue_name, - project=project_id, - client_uuid=client_uuid, - **kwargs) - - # Buffer messages - cursor = next(results) - messages = list(cursor) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.QueueDoesNotExist as ex: - LOG.debug(ex) - messages = None - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - if not messages: - messages = [] - - else: - # Found some messages, so prepare the response - kwargs['marker'] = next(results) - base_path = req.path.rsplit('/', 1)[0] - messages = [wsgi_utils.format_message_v1_1(m, base_path, - m['claim_id']) - for m in messages] - - links = [] - if messages: - links = [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - - return { - 'messages': messages, - 'links': links - } - - # ---------------------------------------------------------------------- - # Interface - # ---------------------------------------------------------------------- - - @decorators.TransportLog("Messages collection") - @acl.enforce("messages:create") - def on_post(self, req, resp, project_id, queue_name): - client_uuid = wsgi_helpers.get_client_uuid(req) - try: - # NOTE(flwang): Replace 'exists' with 'get_metadata' won't impact - # the performance since both of them will call - # collection.find_one() - queue_meta = None - try: - queue_meta = self._queue_controller.get_metadata(queue_name, - project_id) - except storage_errors.DoesNotExist as ex: - self._validate.queue_identification(queue_name, project_id) - self._queue_controller.create(queue_name, project=project_id) - # NOTE(flwang): Queue is created in lazy mode, so no metadata - # set. - queue_meta = {} - - queue_max_msg_size = queue_meta.get('_max_messages_post_size') - queue_default_ttl = queue_meta.get('_default_message_ttl') - - # TODO(flwang): To avoid any unexpected regression issue, we just - # leave the _message_post_spec attribute of class as it's. It - # should be removed in Newton release. - if queue_default_ttl: - message_post_spec = (('ttl', int, queue_default_ttl), - ('body', '*', None),) - else: - message_post_spec = (('ttl', int, self._default_message_ttl), - ('body', '*', None),) - # Place JSON size restriction before parsing - self._validate.message_length(req.content_length, - max_msg_post_size=queue_max_msg_size) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - # Deserialize and validate the incoming messages - document = wsgi_utils.deserialize(req.stream, req.content_length) - - if 'messages' not in document: - description = _(u'No messages were found in the request body.') - raise wsgi_errors.HTTPBadRequestAPI(description) - - messages = wsgi_utils.sanitize(document['messages'], - message_post_spec, - doctype=wsgi_utils.JSONArray) - - try: - self._validate.message_posting(messages) - - message_ids = self._message_controller.post( - queue_name, - messages=messages, - project=project_id, - client_uuid=client_uuid) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except storage_errors.MessageConflict as ex: - LOG.exception(ex) - description = _(u'No messages could be enqueued.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be enqueued.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare the response - ids_value = ','.join(message_ids) - resp.location = req.path + '?ids=' + ids_value - - hrefs = [req.path + '/' + id for id in message_ids] - body = {'resources': hrefs} - resp.body = utils.to_json(body) - resp.status = falcon.HTTP_201 - - @decorators.TransportLog("Messages collection") - @acl.enforce("messages:get_all") - def on_get(self, req, resp, project_id, queue_name): - ids = req.get_param_as_list('ids') - - if ids is None: - response = self._get(req, project_id, queue_name) - - else: - response = self._get_by_id(req.path.rsplit('/', 1)[0], project_id, - queue_name, ids) - - if response is None: - # NOTE(TheSriram): Trying to get a message by id, should - # return the message if its present, otherwise a 404 since - # the message might have been deleted. - msg = _(u'No messages with IDs: {ids} found in the queue {queue} ' - u'for project {project}.') - description = msg.format(queue=queue_name, project=project_id, - ids=ids) - raise wsgi_errors.HTTPNotFound(description) - - else: - resp.body = utils.to_json(response) - # status defaults to 200 - - @decorators.TransportLog("Messages collection") - @acl.enforce("messages:delete_all") - def on_delete(self, req, resp, project_id, queue_name): - ids = req.get_param_as_list('ids') - pop_limit = req.get_param_as_int('pop') - try: - self._validate.message_deletion(ids, pop_limit) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - if ids: - resp.status = self._delete_messages_by_id(queue_name, ids, - project_id) - - elif pop_limit: - resp.status, resp.body = self._pop_messages(queue_name, - project_id, - pop_limit) - - def _delete_messages_by_id(self, queue_name, ids, project_id): - try: - self._message_controller.bulk_delete( - queue_name, - message_ids=ids, - project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - return falcon.HTTP_204 - - def _pop_messages(self, queue_name, project_id, pop_limit): - try: - LOG.debug(u'POP messages - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - messages = self._message_controller.pop( - queue_name, - project=project_id, - limit=pop_limit) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Messages could not be popped.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - if not messages: - messages = [] - body = {'messages': messages} - body = utils.to_json(body) - - return falcon.HTTP_200, body - - -class ItemResource(object): - - __slots__ = '_message_controller' - - def __init__(self, message_controller): - self._message_controller = message_controller - - @decorators.TransportLog("Messages item") - @acl.enforce("messages:get") - def on_get(self, req, resp, project_id, queue_name, message_id): - try: - message = self._message_controller.get( - queue_name, - message_id, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Prepare response - message['href'] = req.path - message = wsgi_utils.format_message_v1_1(message, - req.path.rsplit('/', 2)[0], - message['claim_id']) - - resp.body = utils.to_json(message) - # status defaults to 200 - - @decorators.TransportLog("Messages item") - @acl.enforce("messages:delete") - def on_delete(self, req, resp, project_id, queue_name, message_id): - error_title = _(u'Unable to delete') - - try: - self._message_controller.delete( - queue_name, - message_id=message_id, - project=project_id, - claim=req.get_param('claim_id')) - - except storage_errors.MessageNotClaimed as ex: - LOG.debug(ex) - description = _(u'A claim was specified, but the message ' - u'is not currently claimed.') - raise falcon.HTTPBadRequest(error_title, description) - - except storage_errors.ClaimDoesNotExist as ex: - LOG.debug(ex) - description = _(u'The specified claim does not exist or ' - u'has expired.') - raise falcon.HTTPBadRequest(error_title, description) - - except storage_errors.NotPermitted as ex: - LOG.debug(ex) - description = _(u'This message is claimed; it cannot be ' - u'deleted without a valid claim ID.') - raise falcon.HTTPForbidden(error_title, description) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Message could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Alles guete - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v2_0/ping.py b/zaqar/transport/wsgi/v2_0/ping.py deleted file mode 100644 index 1868e17b..00000000 --- a/zaqar/transport/wsgi/v2_0/ping.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2014 IBM Corp. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon - -from zaqar.common import decorators -from zaqar.transport import acl - - -class Resource(object): - - __slots__ = ('_driver',) - - def __init__(self, driver): - self._driver = driver - - @decorators.TransportLog("Ping item") - @acl.enforce("ping:get") - def on_get(self, req, resp, **kwargs): - resp.status = (falcon.HTTP_204 if self._driver.is_alive() - else falcon.HTTP_503) - - @decorators.TransportLog("Ping item") - @acl.enforce("ping:get") - def on_head(self, req, resp, **kwargs): - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v2_0/pools.py b/zaqar/transport/wsgi/v2_0/pools.py deleted file mode 100644 index 9feba9f9..00000000 --- a/zaqar/transport/wsgi/v2_0/pools.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""pools: a resource to handle storage pool management - -A pool is added by an operator by interacting with the -pooling-related endpoints. When specifying a pool, the -following fields are required: - -:: - - { - "name": string, - "weight": integer, - "uri": string::uri - } - -Furthermore, depending on the underlying storage type of pool being -registered, there is an optional field: -:: - - { - "options": {...} - } -""" - -import falcon -import jsonschema -from oslo_log import log -import six - -from zaqar.common.api.schemas import pools as schema -from zaqar.common import decorators -from zaqar.common import utils as common_utils -from zaqar.i18n import _ -from zaqar.storage import errors -from zaqar.storage import utils as storage_utils -from zaqar.transport import acl -from zaqar.transport import utils as transport_utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = log.getLogger(__name__) - - -class Listing(object): - """A resource to list registered pools - - :param pools_controller: means to interact with storage - """ - - def __init__(self, pools_controller, validate): - self._ctrl = pools_controller - self._validate = validate - - @decorators.TransportLog("Pools collection") - @acl.enforce("pools:get_all") - def on_get(self, request, response, project_id): - """Returns a pool listing as objects embedded in an object: - - :: - - { - "pools": [ - {"href": "", "weight": 100, "uri": ""}, - ... - ], - "links": [ - {"href": "", "rel": "next"} - ] - } - - :returns: HTTP | 200 - """ - - LOG.debug(u'LIST pools') - - store = {} - request.get_param('marker', store=store) - request.get_param_as_int('limit', store=store) - request.get_param_as_bool('detailed', store=store) - - try: - self._validate.pool_listing(**store) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - cursor = self._ctrl.list(**store) - pools = list(next(cursor)) - - results = {'links': []} - - if pools: - store['marker'] = next(cursor) - - for entry in pools: - entry['href'] = request.path + '/' + entry['name'] - - results['links'] = [ - { - 'rel': 'next', - 'href': request.path + falcon.to_query_str(store) - } - ] - - results['pools'] = pools - - response.content_location = request.relative_uri - response.body = transport_utils.to_json(results) - response.status = falcon.HTTP_200 - - -class Resource(object): - """A handler for individual pool. - - :param pools_controller: means to interact with storage - """ - - def __init__(self, pools_controller): - self._ctrl = pools_controller - validator_type = jsonschema.Draft4Validator - self._validators = { - 'weight': validator_type(schema.patch_weight), - 'uri': validator_type(schema.patch_uri), - 'group': validator_type(schema.patch_uri), - 'options': validator_type(schema.patch_options), - 'create': validator_type(schema.create) - } - - @decorators.TransportLog("Pools item") - @acl.enforce("pools:get") - def on_get(self, request, response, project_id, pool): - """Returns a JSON object for a single pool entry: - - :: - - {"weight": 100, "uri": "", options: {...}} - - :returns: HTTP | [200, 404] - """ - - LOG.debug(u'GET pool - name: %s', pool) - data = None - detailed = request.get_param_as_bool('detailed') or False - - try: - data = self._ctrl.get(pool, detailed) - - except errors.PoolDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - data['href'] = request.path - - response.body = transport_utils.to_json(data) - - @decorators.TransportLog("Pools item") - @acl.enforce("pools:create") - def on_put(self, request, response, project_id, pool): - """Registers a new pool. Expects the following input: - - :: - - {"weight": 100, "uri": ""} - - An options object may also be provided. - - :returns: HTTP | [201, 204] - """ - - LOG.debug(u'PUT pool - name: %s', pool) - - conf = self._ctrl.driver.conf - data = wsgi_utils.load(request) - wsgi_utils.validate(self._validators['create'], data) - if not storage_utils.can_connect(data['uri'], conf=conf): - raise wsgi_errors.HTTPBadRequestBody( - 'cannot connect to %s' % data['uri'] - ) - try: - self._ctrl.create(pool, weight=data['weight'], - uri=data['uri'], - group=data.get('group'), - options=data.get('options', {})) - response.status = falcon.HTTP_201 - response.location = request.path - except errors.PoolCapabilitiesMismatch as e: - LOG.exception(e) - title = _(u'Unable to create pool') - raise falcon.HTTPBadRequest(title, six.text_type(e)) - except errors.PoolAlreadyExists as e: - LOG.exception(e) - raise wsgi_errors.HTTPConflict(six.text_type(e)) - - @decorators.TransportLog("Pools item") - @acl.enforce("pools:delete") - def on_delete(self, request, response, project_id, pool): - """Deregisters a pool. - - :returns: HTTP | [204, 403] - """ - - LOG.debug(u'DELETE pool - name: %s', pool) - - try: - self._ctrl.delete(pool) - except errors.PoolInUseByFlavor as ex: - LOG.exception(ex) - title = _(u'Unable to delete') - description = _(u'This pool is used by flavors {flavor}; ' - u'It cannot be deleted.') - description = description.format(flavor=ex.flavor) - raise falcon.HTTPForbidden(title, description) - - response.status = falcon.HTTP_204 - - @decorators.TransportLog("Pools item") - @acl.enforce("pools:update") - def on_patch(self, request, response, project_id, pool): - """Allows one to update a pool's weight, uri, and/or options. - - This method expects the user to submit a JSON object - containing at least one of: 'uri', 'weight', 'group', 'options'. If - none are found, the request is flagged as bad. There is also - strict format checking through the use of - jsonschema. Appropriate errors are returned in each case for - badly formatted input. - - :returns: HTTP | 200,400 - """ - - LOG.debug(u'PATCH pool - name: %s', pool) - data = wsgi_utils.load(request) - - EXPECT = ('weight', 'uri', 'group', 'options') - if not any([(field in data) for field in EXPECT]): - LOG.debug(u'PATCH pool, bad params') - raise wsgi_errors.HTTPBadRequestBody( - 'One of `uri`, `weight`, `group`, or `options` needs ' - 'to be specified' - ) - - for field in EXPECT: - wsgi_utils.validate(self._validators[field], data) - - conf = self._ctrl.driver.conf - if 'uri' in data and not storage_utils.can_connect(data['uri'], - conf=conf): - raise wsgi_errors.HTTPBadRequestBody( - 'cannot connect to %s' % data['uri'] - ) - fields = common_utils.fields(data, EXPECT, - pred=lambda v: v is not None) - resp_data = None - try: - self._ctrl.update(pool, **fields) - resp_data = self._ctrl.get(pool, False) - except errors.PoolDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - resp_data['href'] = request.path - response.body = transport_utils.to_json(resp_data) diff --git a/zaqar/transport/wsgi/v2_0/purge.py b/zaqar/transport/wsgi/v2_0/purge.py deleted file mode 100644 index 76283bfd..00000000 --- a/zaqar/transport/wsgi/v2_0/purge.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2016 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon - -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.transport import acl -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - - -class Resource(object): - - __slots__ = ('_driver', '_conf', '_queue_ctrl', - '_message_ctrl', '_subscription_ctrl', '_validate') - - def __init__(self, driver): - self._driver = driver - self._conf = driver._conf - self._queue_ctrl = driver._storage.queue_controller - self._message_ctrl = driver._storage.message_controller - self._subscription_ctrl = driver._storage.subscription_controller - self._validate = driver._validate - - @decorators.TransportLog("Queues item") - @acl.enforce("queues:purge") - def on_post(self, req, resp, project_id, queue_name): - try: - if req.content_length: - document = wsgi_utils.deserialize(req.stream, - req.content_length) - self._validate.queue_purging(document) - else: - document = {'resource_types': ['messages', 'subscriptions']} - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - try: - if "messages" in document['resource_types']: - pop_limit = 100 - LOG.debug("Purge all messages under queue %s", queue_name) - messages = self._message_ctrl.pop(queue_name, pop_limit, - project=project_id) - while messages: - messages = self._message_ctrl.pop(queue_name, pop_limit, - project=project_id) - - if "subscriptions" in document['resource_types']: - LOG.debug("Purge all subscriptions under queue %s", queue_name) - results = self._subscription_ctrl.list(queue_name, - project=project_id) - subscriptions = list(next(results)) - for sub in subscriptions: - self._subscription_ctrl.delete(queue_name, - sub['id'], - project=project_id) - except ValueError as err: - raise wsgi_errors.HTTPBadRequestAPI(str(err)) - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be purged.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 diff --git a/zaqar/transport/wsgi/v2_0/queues.py b/zaqar/transport/wsgi/v2_0/queues.py deleted file mode 100644 index b48710ac..00000000 --- a/zaqar/transport/wsgi/v2_0/queues.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import falcon -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - - -def _get_reserved_metadata(validate): - _reserved_metadata = ['max_messages_post_size', 'default_message_ttl'] - reserved_metadata = { - '_%s' % meta: - validate.get_limit_conf_value(meta) - for meta in _reserved_metadata - } - return reserved_metadata - - -class ItemResource(object): - - __slots__ = ('_validate', '_queue_controller', '_message_controller', - '_reserved_metadata') - - def __init__(self, validate, queue_controller, message_controller): - self._validate = validate - self._queue_controller = queue_controller - self._message_controller = message_controller - - @decorators.TransportLog("Queues item") - @acl.enforce("queues:get") - def on_get(self, req, resp, project_id, queue_name): - try: - resp_dict = self._queue_controller.get(queue_name, - project=project_id) - for meta, value in _get_reserved_metadata(self._validate).items(): - if not resp_dict.get(meta): - resp_dict[meta] = value - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue metadata could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - @decorators.TransportLog("Queues item") - @acl.enforce("queues:create") - def on_put(self, req, resp, project_id, queue_name): - try: - # Place JSON size restriction before parsing - self._validate.queue_metadata_length(req.content_length) - # Deserialize queue metadata - metadata = None - if req.content_length: - document = wsgi_utils.deserialize(req.stream, - req.content_length) - metadata = wsgi_utils.sanitize(document) - self._validate.queue_metadata_putting(metadata) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - try: - created = self._queue_controller.create(queue_name, - metadata=metadata, - project=project_id) - - except storage_errors.FlavorDoesNotExist as ex: - LOG.exception(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_201 if created else falcon.HTTP_204 - resp.location = req.path - - @decorators.TransportLog("Queues item") - @acl.enforce("queues:delete") - def on_delete(self, req, resp, project_id, queue_name): - LOG.debug(u'Queue item DELETE - queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - try: - self._queue_controller.delete(queue_name, project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 - - @decorators.TransportLog("Queues item") - @acl.enforce("queues:update") - def on_patch(self, req, resp, project_id, queue_name): - """Allows one to update a queue's metadata. - - This method expects the user to submit a JSON object. There is also - strict format checking through the use of - jsonschema. Appropriate errors are returned in each case for - badly formatted input. - - :returns: HTTP | 200,400,409,503 - """ - LOG.debug(u'PATCH queue - name: %s', queue_name) - - try: - # Place JSON size restriction before parsing - self._validate.queue_metadata_length(req.content_length) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestBody(six.text_type(ex)) - - # NOTE(flwang): See below link to get more details about draft 10, - # tools.ietf.org/html/draft-ietf-appsawg-json-patch-10 - content_types = { - 'application/openstack-messaging-v2.0-json-patch': 10, - } - - if req.content_type not in content_types: - headers = {'Accept-Patch': - ', '.join(sorted(content_types.keys()))} - msg = _("Accepted media type for PATCH: %s.") - LOG.debug(msg, headers) - raise wsgi_errors.HTTPUnsupportedMediaType(msg % headers) - - if req.content_length: - try: - changes = utils.read_json(req.stream, req.content_length) - changes = wsgi_utils.sanitize(changes, doctype=list) - except utils.MalformedJSON as ex: - LOG.debug(ex) - description = _(u'Request body could not be parsed.') - raise wsgi_errors.HTTPBadRequestBody(description) - - except utils.OverflowedJSONInteger as ex: - LOG.debug(ex) - description = _(u'JSON contains integer that is too large.') - raise wsgi_errors.HTTPBadRequestBody(description) - - except Exception as ex: - # Error while reading from the network/server - LOG.exception(ex) - description = _(u'Request body could not be read.') - raise wsgi_errors.HTTPServiceUnavailable(description) - else: - msg = _("PATCH body could not be empty for update.") - LOG.debug(msg) - raise wsgi_errors.HTTPBadRequestBody(msg) - - try: - changes = self._validate.queue_patching(req, changes) - - # NOTE(Eva-i): using 'get_metadata' instead of 'get', so - # QueueDoesNotExist error will be thrown in case of non-existent - # queue. - metadata = self._queue_controller.get_metadata(queue_name, - project=project_id) - reserved_metadata = _get_reserved_metadata(self._validate) - for change in changes: - change_method_name = '_do_%s' % change['op'] - change_method = getattr(self, change_method_name) - change_method(req, metadata, reserved_metadata, change) - - self._validate.queue_metadata_putting(metadata) - - self._queue_controller.set_metadata(queue_name, - metadata, - project_id) - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestBody(six.text_type(ex)) - except wsgi_errors.HTTPConflict as ex: - raise ex - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue could not be updated.') - raise wsgi_errors.HTTPServiceUnavailable(description) - for meta, value in _get_reserved_metadata(self._validate).items(): - if not metadata.get(meta): - metadata[meta] = value - resp.body = utils.to_json(metadata) - - def _do_replace(self, req, metadata, reserved_metadata, change): - path = change['path'] - path_child = path[1] - value = change['value'] - if path_child in metadata or path_child in reserved_metadata: - metadata[path_child] = value - else: - msg = _("Can't replace non-existent object %s.") - raise wsgi_errors.HTTPConflict(msg % path_child) - - def _do_add(self, req, metadata, reserved_metadata, change): - path = change['path'] - path_child = path[1] - value = change['value'] - metadata[path_child] = value - - def _do_remove(self, req, metadata, reserved_metadata, change): - path = change['path'] - path_child = path[1] - if path_child in metadata: - metadata.pop(path_child) - elif path_child not in reserved_metadata: - msg = _("Can't remove non-existent object %s.") - raise wsgi_errors.HTTPConflict(msg % path_child) - - -class CollectionResource(object): - - __slots__ = ('_queue_controller', '_validate', '_reserved_metadata') - - def __init__(self, validate, queue_controller): - self._queue_controller = queue_controller - self._validate = validate - - @decorators.TransportLog("Queues collection") - @acl.enforce("queues:get_all") - def on_get(self, req, resp, project_id): - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - req.get_param_as_bool('detailed', store=kwargs) - - try: - self._validate.queue_listing(**kwargs) - results = self._queue_controller.list(project=project_id, **kwargs) - - # Buffer list of queues - queues = list(next(results)) - - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queues could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Got some. Prepare the response. - kwargs['marker'] = next(results) or kwargs.get('marker', '') - reserved_metadata = _get_reserved_metadata(self._validate).items() - for each_queue in queues: - each_queue['href'] = req.path + '/' + each_queue['name'] - if kwargs.get('detailed'): - for meta, value in reserved_metadata: - if not each_queue.get('metadata', {}).get(meta): - each_queue['metadata'][meta] = value - - links = [] - if queues: - links = [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - - response_body = { - 'queues': queues, - 'links': links - } - - resp.body = utils.to_json(response_body) - # status defaults to 200 diff --git a/zaqar/transport/wsgi/v2_0/stats.py b/zaqar/transport/wsgi/v2_0/stats.py deleted file mode 100644 index 2258ee8c..00000000 --- a/zaqar/transport/wsgi/v2_0/stats.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2013 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.storage import errors as storage_errors -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport.wsgi import errors as wsgi_errors - - -LOG = logging.getLogger(__name__) - - -class Resource(object): - - __slots__ = '_queue_ctrl' - - def __init__(self, queue_controller): - self._queue_ctrl = queue_controller - - @decorators.TransportLog("Queues stats item") - @acl.enforce("queues:stats") - def on_get(self, req, resp, project_id, queue_name): - try: - resp_dict = self._queue_ctrl.stats(queue_name, - project=project_id) - - message_stats = resp_dict['messages'] - - if message_stats['total'] != 0: - base_path = req.path[:req.path.rindex('/')] + '/messages/' - - newest = message_stats['newest'] - newest['href'] = base_path + newest['id'] - del newest['id'] - - oldest = message_stats['oldest'] - oldest['href'] = base_path + oldest['id'] - del oldest['id'] - - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - except (storage_errors.QueueDoesNotExist, - storage_errors.QueueIsEmpty) as ex: - resp_dict = { - 'messages': { - 'claimed': 0, - 'free': 0, - 'total': 0 - } - } - resp.body = utils.to_json(resp_dict) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Queue stats could not be read.') - raise wsgi_errors.HTTPServiceUnavailable(description) diff --git a/zaqar/transport/wsgi/v2_0/subscriptions.py b/zaqar/transport/wsgi/v2_0/subscriptions.py deleted file mode 100644 index a0a7f146..00000000 --- a/zaqar/transport/wsgi/v2_0/subscriptions.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright (c) 2015 Catalyst IT Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import datetime - -import falcon -from oslo_log import log as logging -from oslo_utils import netutils -from oslo_utils import timeutils -import six -from stevedore import driver - -from zaqar.common import decorators -from zaqar.i18n import _ -from zaqar.notification import notifier -from zaqar.storage import errors as storage_errors -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport import validation -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - - -LOG = logging.getLogger(__name__) - - -class ItemResource(object): - - __slots__ = ('_validate', '_subscription_controller') - - def __init__(self, validate, subscription_controller): - self._validate = validate - self._subscription_controller = subscription_controller - - @decorators.TransportLog("Subscriptions item") - @acl.enforce("subscription:get") - def on_get(self, req, resp, project_id, queue_name, subscription_id): - try: - resp_dict = self._subscription_controller.get(queue_name, - subscription_id, - project=project_id) - - except storage_errors.DoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Subscription could not be retrieved.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.body = utils.to_json(resp_dict) - # status defaults to 200 - - @decorators.TransportLog("Subscriptions item") - @acl.enforce("subscription:delete") - def on_delete(self, req, resp, project_id, queue_name, subscription_id): - try: - self._subscription_controller.delete(queue_name, - subscription_id, - project=project_id) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Subscription could not be deleted.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - resp.status = falcon.HTTP_204 - - @decorators.TransportLog("Subscriptions item") - @acl.enforce("subscription:update") - def on_patch(self, req, resp, project_id, queue_name, subscription_id): - if req.content_length: - document = wsgi_utils.deserialize(req.stream, req.content_length) - else: - document = {} - - try: - self._validate.subscription_patching(document) - self._subscription_controller.update(queue_name, subscription_id, - project=project_id, - **document) - resp.status = falcon.HTTP_204 - resp.location = req.path - except storage_errors.SubscriptionDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - except storage_errors.SubscriptionAlreadyExists as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPConflict(six.text_type(ex)) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = (_(u'Subscription %(subscription_id)s could not be' - ' updated.') % - dict(subscription_id=subscription_id)) - raise falcon.HTTPBadRequest(_('Unable to update subscription'), - description) - - -class CollectionResource(object): - - __slots__ = ('_subscription_controller', '_validate', - '_default_subscription_ttl', '_queue_controller', - '_conf', '_notification') - - def __init__(self, validate, subscription_controller, - default_subscription_ttl, queue_controller, conf): - self._subscription_controller = subscription_controller - self._validate = validate - self._default_subscription_ttl = default_subscription_ttl - self._queue_controller = queue_controller - self._conf = conf - self._notification = notifier.NotifierDriver() - - @decorators.TransportLog("Subscriptions collection") - @acl.enforce("subscription:get_all") - def on_get(self, req, resp, project_id, queue_name): - kwargs = {} - - # NOTE(kgriffs): This syntax ensures that - # we don't clobber default values with None. - req.get_param('marker', store=kwargs) - req.get_param_as_int('limit', store=kwargs) - - try: - self._validate.subscription_listing(**kwargs) - results = self._subscription_controller.list(queue_name, - project=project_id, - **kwargs) - # Buffer list of subscriptions. Can raise NoPoolFound error. - subscriptions = list(next(results)) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - except Exception as ex: - LOG.exception(ex) - description = _(u'Subscriptions could not be listed.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - # Got some. Prepare the response. - kwargs['marker'] = next(results) or kwargs.get('marker', '') - - links = [] - if subscriptions: - links = [ - { - 'rel': 'next', - 'href': req.path + falcon.to_query_str(kwargs) - } - ] - response_body = { - 'subscriptions': subscriptions, - 'links': links - } - - resp.body = utils.to_json(response_body) - # status defaults to 200 - - @decorators.TransportLog("Subscriptions collection") - @acl.enforce("subscription:create") - def on_post(self, req, resp, project_id, queue_name): - if req.content_length: - document = wsgi_utils.deserialize(req.stream, req.content_length) - else: - document = {} - - try: - if not self._queue_controller.exists(queue_name, project_id): - self._queue_controller.create(queue_name, project=project_id) - self._validate.subscription_posting(document) - subscriber = document['subscriber'] - options = document.get('options', {}) - url = netutils.urlsplit(subscriber) - ttl = document.get('ttl', self._default_subscription_ttl) - mgr = driver.DriverManager('zaqar.notification.tasks', url.scheme, - invoke_on_load=True) - req_data = req.headers.copy() - req_data.update(req.env) - mgr.driver.register(subscriber, options, ttl, project_id, req_data) - - created = self._subscription_controller.create(queue_name, - subscriber, - ttl, - options, - project=project_id) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = _(u'Subscription could not be created.') - raise wsgi_errors.HTTPServiceUnavailable(description) - - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - expires = now_dt + datetime.timedelta(seconds=ttl) - api_version = req.path.split('/')[1] - if created: - subscription = self._subscription_controller.get(queue_name, - created, - project_id) - # send confirm notification - self._notification.send_confirm_notification( - queue_name, subscription, self._conf, project_id, - str(expires), api_version) - - resp.location = req.path - resp.status = falcon.HTTP_201 - resp.body = utils.to_json( - {'subscription_id': six.text_type(created)}) - else: - subscription = self._subscription_controller.get_with_subscriber( - queue_name, subscriber, project_id) - confirmed = subscription.get('confirmed', True) - if confirmed: - description = _(u'Such subscription already exists.' - u'Subscriptions are unique by project + queue ' - u'+ subscriber URI.') - raise wsgi_errors.HTTPConflict(description, - headers={'location': req.path}) - else: - # The subscription is not confirmed, re-send confirm - # notification - self._notification.send_confirm_notification( - queue_name, subscription, self._conf, project_id, - str(expires), api_version) - - resp.location = req.path - resp.status = falcon.HTTP_201 - resp.body = utils.to_json( - {'subscription_id': six.text_type(subscription['id'])}) - - -class ConfirmResource(object): - - __slots__ = ('_subscription_controller', '_validate', '_notification', - '_conf') - - def __init__(self, validate, subscription_controller, conf): - self._subscription_controller = subscription_controller - self._validate = validate - self._notification = notifier.NotifierDriver() - self._conf = conf - - @decorators.TransportLog("Subscriptions confirmation item") - @acl.enforce("subscription:confirm") - def on_put(self, req, resp, project_id, queue_name, subscription_id): - if req.content_length: - document = wsgi_utils.deserialize(req.stream, req.content_length) - else: - document = {} - - try: - self._validate.subscription_confirming(document) - confirmed = document.get('confirmed') - self._subscription_controller.confirm(queue_name, subscription_id, - project=project_id, - confirmed=confirmed) - if confirmed is False: - now = timeutils.utcnow_ts() - now_dt = datetime.datetime.utcfromtimestamp(now) - ttl = self._conf.transport.default_subscription_ttl - expires = now_dt + datetime.timedelta(seconds=ttl) - api_version = req.path.split('/')[1] - sub = self._subscription_controller.get(queue_name, - subscription_id, - project=project_id) - self._notification.send_confirm_notification(queue_name, - sub, - self._conf, - project_id, - str(expires), - api_version, - True) - resp.status = falcon.HTTP_204 - resp.location = req.path - except storage_errors.SubscriptionDoesNotExist as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPNotFound(six.text_type(ex)) - except validation.ValidationFailed as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - except Exception as ex: - LOG.exception(ex) - description = (_(u'Subscription %(subscription_id)s could not be' - ' confirmed.') % - dict(subscription_id=subscription_id)) - raise falcon.HTTPBadRequest(_('Unable to confirm subscription'), - description) diff --git a/zaqar/transport/wsgi/v2_0/urls.py b/zaqar/transport/wsgi/v2_0/urls.py deleted file mode 100644 index ed5a4c19..00000000 --- a/zaqar/transport/wsgi/v2_0/urls.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import os - -from oslo_log import log as logging -import six - -from zaqar.common import decorators -from zaqar.common import urls -from zaqar.transport import acl -from zaqar.transport import utils -from zaqar.transport.wsgi import errors as wsgi_errors -from zaqar.transport.wsgi import utils as wsgi_utils - -LOG = logging.getLogger(__name__) - -_KNOWN_KEYS = {'methods', 'expires', 'paths'} - -_VALID_PATHS = {'messages', 'subscriptions', 'claims'} - - -class Resource(object): - - __slots__ = ('_driver', '_conf') - - def __init__(self, driver): - self._driver = driver - self._conf = driver._conf - - @decorators.TransportLog("Queues share item") - @acl.enforce("queues:share") - def on_post(self, req, resp, project_id, queue_name): - LOG.debug(u'Pre-Signed URL Creation for queue: %(queue)s, ' - u'project: %(project)s', - {'queue': queue_name, 'project': project_id}) - - try: - document = wsgi_utils.deserialize(req.stream, req.content_length) - except ValueError as ex: - LOG.debug(ex) - raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) - - diff = set(document.keys()) - _KNOWN_KEYS - if diff: - msg = six.text_type('Unknown keys: %s' % diff) - raise wsgi_errors.HTTPBadRequestAPI(msg) - - key = self._conf.signed_url.secret_key - paths = document.pop('paths', None) - if not paths: - paths = [os.path.join(req.path[:-6], 'messages')] - else: - diff = set(paths) - _VALID_PATHS - if diff: - msg = six.text_type('Invalid paths: %s' % diff) - raise wsgi_errors.HTTPBadRequestAPI(msg) - paths = [os.path.join(req.path[:-6], path) for path in paths] - - try: - data = urls.create_signed_url(key, paths, - project=project_id, - **document) - except ValueError as err: - raise wsgi_errors.HTTPBadRequestAPI(str(err)) - - resp.body = utils.to_json(data) diff --git a/zaqar/transport/wsgi/version.py b/zaqar/transport/wsgi/version.py deleted file mode 100644 index 0a8792d6..00000000 --- a/zaqar/transport/wsgi/version.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -import falcon - -from zaqar.transport import utils -from zaqar.transport.wsgi import v1_0 -from zaqar.transport.wsgi import v1_1 -from zaqar.transport.wsgi import v2_0 - -VERSIONS = { - 'versions': [ - v1_0.VERSION, - v1_1.VERSION, - v2_0.VERSION - ] -} - - -class Resource(object): - - def __init__(self): - self.versions = utils.to_json(VERSIONS) - - def on_get(self, req, resp, project_id): - resp.data = self.versions - - resp.status = falcon.HTTP_300 diff --git a/zaqar/version.py b/zaqar/version.py deleted file mode 100644 index 352b2f22..00000000 --- a/zaqar/version.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pbr.version - - -version_info = pbr.version.VersionInfo('zaqar') -version_string = version_info.version_string - - -def verify_sha(expected): - """Verifies the commit hash for an interim Zaqar build. - - This function may be used to verify that the version of the zaqar - package, as imported from an environment's site-packages, is the - expected build. This allows continuous integration scripts to - detect out-of-date installations of the package. - - Note that this function will ALWAYS return False for Zaqar packages - that were not installed from git. - - :param expected: The expected commit object name. May be either a full - or abbreviated SHA hash. If abbreviated, at least 7 digits are - required. - :returns: True if the package's version string contains a hash, and - that hash matches `expected`. Otherwise returns False. - """ - - # NOTE(kgriffs): Require 7 digits to avoid false positives. In practice, - # Git's abbreviated commit oject names will always include at least - # 7 digits. - assert len(expected) >= 7 - - # NOTE(kgriffs): Git usually abbreviates hashed to 7 digits, but also - # check 8 digits in case git decides just 7 is ambiguous. Accordingly, - # try the longer one first since it is more specific than the other. - for abbreviated in (expected[:8], expected[:7]): - if ('.g' + abbreviated) in version_info.release_string(): - return True - - return False diff --git a/zaqar_upgradetests/post_test_hook.sh b/zaqar_upgradetests/post_test_hook.sh deleted file mode 100755 index e69de29b..00000000 diff --git a/zaqar_upgradetests/pre_test_hook.sh b/zaqar_upgradetests/pre_test_hook.sh deleted file mode 100755 index e69de29b..00000000